2024-11-23 17:05:28,128 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-23 17:05:28,165 main DEBUG Took 0.016444 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 17:05:28,166 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 17:05:28,166 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 17:05:28,168 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 17:05:28,170 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,181 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 17:05:28,215 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,217 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,218 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,225 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,225 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,226 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,227 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,228 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,229 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,230 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,232 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,233 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,234 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,235 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,236 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,236 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,237 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,239 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,240 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,242 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,244 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:05:28,245 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,246 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 17:05:28,248 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:05:28,250 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 17:05:28,253 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 17:05:28,261 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 17:05:28,264 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 17:05:28,264 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 17:05:28,277 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 17:05:28,281 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 17:05:28,283 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 17:05:28,283 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 17:05:28,284 main DEBUG createAppenders(={Console}) 2024-11-23 17:05:28,285 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-11-23 17:05:28,289 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-11-23 17:05:28,290 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-11-23 17:05:28,290 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 17:05:28,291 main DEBUG OutputStream closed 2024-11-23 17:05:28,291 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 17:05:28,291 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 17:05:28,292 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-11-23 17:05:28,382 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 17:05:28,385 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 17:05:28,386 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 17:05:28,387 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 17:05:28,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 17:05:28,388 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 17:05:28,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 17:05:28,389 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 17:05:28,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 17:05:28,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 17:05:28,390 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 17:05:28,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 17:05:28,391 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 17:05:28,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 17:05:28,392 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 17:05:28,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 17:05:28,393 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 17:05:28,394 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 17:05:28,397 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 17:05:28,398 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-11-23 17:05:28,398 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 17:05:28,399 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-11-23T17:05:28,420 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-23 17:05:28,424 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 17:05:28,425 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T17:05:28,850 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204 2024-11-23T17:05:28,851 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-23T17:05:28,851 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-23T17:05:28,910 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T17:05:29,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T17:05:29,234 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534, deleteOnExit=true 2024-11-23T17:05:29,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T17:05:29,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/test.cache.data in system properties and HBase conf 2024-11-23T17:05:29,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T17:05:29,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir in system properties and HBase conf 2024-11-23T17:05:29,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T17:05:29,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T17:05:29,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T17:05:29,344 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T17:05:29,369 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:05:29,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:05:29,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T17:05:29,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:05:29,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T17:05:29,388 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T17:05:29,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:05:29,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:05:29,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T17:05:29,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/nfs.dump.dir in system properties and HBase conf 2024-11-23T17:05:29,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir in system properties and HBase conf 2024-11-23T17:05:29,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:05:29,393 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T17:05:29,394 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T17:05:30,541 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T17:05:30,651 INFO [Time-limited test {}] log.Log(170): Logging initialized @3639ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T17:05:30,760 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:30,853 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:30,893 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:30,893 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:30,895 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:05:30,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:30,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:30,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:31,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5996a1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-39301-hadoop-hdfs-3_4_1-tests_jar-_-any-10445591714185171391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T17:05:31,176 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301} 2024-11-23T17:05:31,176 INFO [Time-limited test {}] server.Server(415): Started @4165ms 2024-11-23T17:05:31,660 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:31,671 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:31,678 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:31,678 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:31,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:05:31,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:31,682 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:31,838 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64986ec5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-44965-hadoop-hdfs-3_4_1-tests_jar-_-any-14731317452165530218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:05:31,839 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965} 2024-11-23T17:05:31,839 INFO [Time-limited test {}] server.Server(415): Started @4828ms 2024-11-23T17:05:31,911 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:05:32,074 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:32,081 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:32,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:32,092 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:32,093 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:05:32,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:32,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:32,247 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b9a68a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-38317-hadoop-hdfs-3_4_1-tests_jar-_-any-13850273831352466515/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:05:32,248 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317} 2024-11-23T17:05:32,250 INFO [Time-limited test {}] server.Server(415): Started @5239ms 2024-11-23T17:05:32,255 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:05:32,401 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:32,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:32,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:32,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:32,418 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:05:32,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:32,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:32,581 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,581 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,584 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,589 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,627 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@367f3488{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-44953-hadoop-hdfs-3_4_1-tests_jar-_-any-11469677506471446466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:05:32,628 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953} 2024-11-23T17:05:32,628 INFO [Time-limited test {}] server.Server(415): Started @5617ms 2024-11-23T17:05:32,631 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:05:32,693 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:05:32,707 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:05:32,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb72a501941d2afec with lease ID 0x66c57f5d2ff9dd0a: Processing first storage report for DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=dbe6d323-d458-4fd8-9c0e-1b05c6e6f31c, infoPort=35159, infoSecurePort=0, ipcPort=39753, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,815 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb72a501941d2afec with lease ID 0x66c57f5d2ff9dd0a: from storage DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=dbe6d323-d458-4fd8-9c0e-1b05c6e6f31c, infoPort=35159, infoSecurePort=0, ipcPort=39753, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-23T17:05:32,815 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8579966c905f7f6 with lease ID 0x66c57f5d2ff9dd09: Processing first storage report for DS-162d84dd-1d36-456f-980d-af608c2736b0 from datanode DatanodeRegistration(127.0.0.1:38951, datanodeUuid=ea1be133-7f2f-4b36-bc4f-bacb5152283b, infoPort=40223, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8579966c905f7f6 with lease ID 0x66c57f5d2ff9dd09: from storage DS-162d84dd-1d36-456f-980d-af608c2736b0 node DatanodeRegistration(127.0.0.1:38951, datanodeUuid=ea1be133-7f2f-4b36-bc4f-bacb5152283b, infoPort=40223, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T17:05:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb72a501941d2afec with lease ID 0x66c57f5d2ff9dd0a: Processing first storage report for DS-83dc9630-18ee-482d-98c4-b79f7f0029c6 from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=dbe6d323-d458-4fd8-9c0e-1b05c6e6f31c, infoPort=35159, infoSecurePort=0, ipcPort=39753, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,816 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb72a501941d2afec with lease ID 0x66c57f5d2ff9dd0a: from storage DS-83dc9630-18ee-482d-98c4-b79f7f0029c6 node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=dbe6d323-d458-4fd8-9c0e-1b05c6e6f31c, infoPort=35159, infoSecurePort=0, ipcPort=39753, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:05:32,817 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf8579966c905f7f6 with lease ID 0x66c57f5d2ff9dd09: Processing first storage report for DS-b186a1f7-3bd0-4270-b4a2-a09bac2a3cff from datanode DatanodeRegistration(127.0.0.1:38951, datanodeUuid=ea1be133-7f2f-4b36-bc4f-bacb5152283b, infoPort=40223, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,817 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf8579966c905f7f6 with lease ID 0x66c57f5d2ff9dd09: from storage DS-b186a1f7-3bd0-4270-b4a2-a09bac2a3cff node DatanodeRegistration(127.0.0.1:38951, datanodeUuid=ea1be133-7f2f-4b36-bc4f-bacb5152283b, infoPort=40223, infoSecurePort=0, ipcPort=44465, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T17:05:32,870 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,881 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223/current, will proceed with Du for space computation calculation, 2024-11-23T17:05:32,937 WARN [Thread-123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:05:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7fea21b1d20a48c with lease ID 0x66c57f5d2ff9dd0b: Processing first storage report for DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32 from datanode DatanodeRegistration(127.0.0.1:32895, datanodeUuid=f9f70e51-565f-4cc4-9aa8-ecb1ccfe8fa1, infoPort=46009, infoSecurePort=0, ipcPort=45243, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7fea21b1d20a48c with lease ID 0x66c57f5d2ff9dd0b: from storage DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32 node DatanodeRegistration(127.0.0.1:32895, datanodeUuid=f9f70e51-565f-4cc4-9aa8-ecb1ccfe8fa1, infoPort=46009, infoSecurePort=0, ipcPort=45243, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T17:05:32,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7fea21b1d20a48c with lease ID 0x66c57f5d2ff9dd0b: Processing first storage report for DS-f4fc3c6c-5161-46cb-93ae-059b5f020c9d from datanode DatanodeRegistration(127.0.0.1:32895, datanodeUuid=f9f70e51-565f-4cc4-9aa8-ecb1ccfe8fa1, infoPort=46009, infoSecurePort=0, ipcPort=45243, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223) 2024-11-23T17:05:32,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7fea21b1d20a48c with lease ID 0x66c57f5d2ff9dd0b: from storage DS-f4fc3c6c-5161-46cb-93ae-059b5f020c9d node DatanodeRegistration(127.0.0.1:32895, datanodeUuid=f9f70e51-565f-4cc4-9aa8-ecb1ccfe8fa1, infoPort=46009, infoSecurePort=0, ipcPort=45243, storageInfo=lv=-57;cid=testClusterID;nsid=187563944;c=1732381530223), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:05:33,180 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204 2024-11-23T17:05:33,272 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/zookeeper_0, clientPort=49631, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T17:05:33,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49631 2024-11-23T17:05:33,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:33,295 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:33,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:05:33,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:05:33,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:05:33,633 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 with version=8 2024-11-23T17:05:33,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/hbase-staging 2024-11-23T17:05:33,799 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T17:05:34,099 INFO [Time-limited test {}] client.ConnectionUtils(128): master/1a7c5470b472:0 server-side Connection retries=45 2024-11-23T17:05:34,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,123 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:05:34,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,123 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:05:34,271 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T17:05:34,353 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T17:05:34,365 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T17:05:34,369 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:05:34,402 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24560 (auto-detected) 2024-11-23T17:05:34,403 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T17:05:34,423 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46159 2024-11-23T17:05:34,448 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46159 connecting to ZooKeeper ensemble=127.0.0.1:49631 2024-11-23T17:05:34,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461590x0, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:05:34,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46159-0x10145ca4ecf0000 connected 2024-11-23T17:05:34,530 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:05:34,556 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33, hbase.cluster.distributed=false 2024-11-23T17:05:34,606 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:05:34,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-23T17:05:34,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46159 2024-11-23T17:05:34,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46159 2024-11-23T17:05:34,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-23T17:05:34,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46159 2024-11-23T17:05:34,718 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a7c5470b472:0 server-side Connection retries=45 2024-11-23T17:05:34,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,720 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:05:34,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,720 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:05:34,723 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:05:34,725 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:05:34,726 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37875 2024-11-23T17:05:34,728 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37875 connecting to ZooKeeper ensemble=127.0.0.1:49631 2024-11-23T17:05:34,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378750x0, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:05:34,777 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37875-0x10145ca4ecf0001 connected 2024-11-23T17:05:34,777 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:05:34,782 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:05:34,792 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-23T17:05:34,794 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:05:34,800 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:05:34,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37875 2024-11-23T17:05:34,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37875 2024-11-23T17:05:34,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37875 2024-11-23T17:05:34,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37875 2024-11-23T17:05:34,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37875 2024-11-23T17:05:34,826 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a7c5470b472:0 server-side Connection retries=45 2024-11-23T17:05:34,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:05:34,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:05:34,827 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:05:34,827 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:05:34,829 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43453 2024-11-23T17:05:34,830 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43453 connecting to ZooKeeper ensemble=127.0.0.1:49631 2024-11-23T17:05:34,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434530x0, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:05:34,841 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:434530x0, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:05:34,841 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43453-0x10145ca4ecf0002 connected 2024-11-23T17:05:34,841 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:05:34,844 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-23T17:05:34,846 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:05:34,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:05:34,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43453 2024-11-23T17:05:34,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43453 2024-11-23T17:05:34,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43453 2024-11-23T17:05:34,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43453 2024-11-23T17:05:34,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43453 2024-11-23T17:05:34,872 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/1a7c5470b472:0 server-side Connection retries=45 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:05:34,873 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:05:34,874 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:05:34,876 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44711 2024-11-23T17:05:34,878 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44711 connecting to ZooKeeper ensemble=127.0.0.1:49631 2024-11-23T17:05:34,879 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:34,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447110x0, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:05:34,889 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447110x0, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:05:34,889 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44711-0x10145ca4ecf0003 connected 2024-11-23T17:05:34,890 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:05:34,896 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-23T17:05:34,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:05:34,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:05:34,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44711 2024-11-23T17:05:34,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44711 2024-11-23T17:05:34,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44711 2024-11-23T17:05:34,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44711 2024-11-23T17:05:34,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44711 2024-11-23T17:05:34,939 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1a7c5470b472:46159 2024-11-23T17:05:34,940 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/1a7c5470b472,46159,1732381533870 2024-11-23T17:05:34,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:34,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:34,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:34,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:34,961 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1a7c5470b472,46159,1732381533870 2024-11-23T17:05:34,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:05:34,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:05:34,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:05:34,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:34,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:34,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:34,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:35,003 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T17:05:35,004 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1a7c5470b472,46159,1732381533870 from backup master directory 2024-11-23T17:05:35,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:35,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:35,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:35,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1a7c5470b472,46159,1732381533870 2024-11-23T17:05:35,008 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:05:35,010 WARN [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:05:35,010 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1a7c5470b472,46159,1732381533870 2024-11-23T17:05:35,013 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T17:05:35,015 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T17:05:35,103 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/hbase.id] with ID: 67f7774a-a655-4a94-8a2f-94d7ca986550 2024-11-23T17:05:35,103 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.tmp/hbase.id 2024-11-23T17:05:35,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:05:35,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:05:35,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:05:35,131 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.tmp/hbase.id]:[hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/hbase.id] 2024-11-23T17:05:35,201 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:35,206 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T17:05:35,231 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 23ms. 2024-11-23T17:05:35,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:35,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:35,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:35,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:35,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:05:35,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:05:35,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:05:35,339 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:05:35,342 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T17:05:35,372 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:05:35,380 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:05:35,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:05:35,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:05:35,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:05:35,488 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/data/master/store 2024-11-23T17:05:35,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:05:35,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:05:35,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:05:35,557 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T17:05:35,568 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:35,570 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T17:05:35,570 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:05:35,570 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:05:35,573 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T17:05:35,573 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:05:35,573 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:05:35,575 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732381535570Disabling compacts and flushes for region at 1732381535570Disabling writes for close at 1732381535573 (+3 ms)Writing region close event to WAL at 1732381535573Closed at 1732381535573 2024-11-23T17:05:35,578 WARN [master/1a7c5470b472:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/data/master/store/.initializing 2024-11-23T17:05:35,578 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870 2024-11-23T17:05:35,587 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:05:35,612 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C46159%2C1732381533870, suffix=, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/oldWALs, maxLogs=10 2024-11-23T17:05:35,644 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619, exclude list is [], retry=0 2024-11-23T17:05:35,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:05:35,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:05:35,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:05:35,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T17:05:35,737 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 2024-11-23T17:05:35,738 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:46009:46009),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-23T17:05:35,739 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:05:35,739 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:35,743 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,744 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,840 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T17:05:35,845 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:35,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:35,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T17:05:35,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:35,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:35,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T17:05:35,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:35,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:35,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,862 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T17:05:35,862 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:35,863 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:35,863 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,867 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,868 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,875 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,875 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,879 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:05:35,883 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:05:35,890 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:35,891 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58775807, jitterRate=-0.12417222559452057}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:05:35,897 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732381535762Initializing all the Stores at 1732381535764 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381535765 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381535767 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381535767Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381535767Cleaning up temporary data from old regions at 1732381535875 (+108 ms)Region opened successfully at 1732381535897 (+22 ms) 2024-11-23T17:05:35,899 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T17:05:35,964 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b987190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a7c5470b472/172.17.0.2:0 2024-11-23T17:05:36,004 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T17:05:36,021 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T17:05:36,022 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T17:05:36,026 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T17:05:36,028 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T17:05:36,035 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-23T17:05:36,035 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T17:05:36,077 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T17:05:36,089 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T17:05:36,091 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T17:05:36,093 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T17:05:36,095 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T17:05:36,098 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T17:05:36,100 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T17:05:36,105 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T17:05:36,107 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T17:05:36,108 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T17:05:36,110 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T17:05:36,127 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T17:05:36,129 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T17:05:36,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:05:36,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:05:36,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:05:36,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:05:36,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,141 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=1a7c5470b472,46159,1732381533870, sessionid=0x10145ca4ecf0000, setting cluster-up flag (Was=false) 2024-11-23T17:05:36,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,166 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T17:05:36,168 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a7c5470b472,46159,1732381533870 2024-11-23T17:05:36,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,182 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T17:05:36,184 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1a7c5470b472,46159,1732381533870 2024-11-23T17:05:36,190 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T17:05:36,219 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(746): ClusterId : 67f7774a-a655-4a94-8a2f-94d7ca986550 2024-11-23T17:05:36,219 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(746): ClusterId : 67f7774a-a655-4a94-8a2f-94d7ca986550 2024-11-23T17:05:36,219 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(746): ClusterId : 67f7774a-a655-4a94-8a2f-94d7ca986550 2024-11-23T17:05:36,223 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:05:36,223 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:05:36,223 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:05:36,225 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-23T17:05:36,230 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:05:36,230 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:05:36,230 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:05:36,230 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:05:36,230 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:05:36,230 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:05:36,230 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:36,231 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-23T17:05:36,235 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:05:36,235 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:05:36,235 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:05:36,236 DEBUG [RS:2;1a7c5470b472:44711 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aef69ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a7c5470b472/172.17.0.2:0 2024-11-23T17:05:36,236 DEBUG [RS:0;1a7c5470b472:37875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@290c1836, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a7c5470b472/172.17.0.2:0 2024-11-23T17:05:36,237 DEBUG [RS:1;1a7c5470b472:43453 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e664b55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1a7c5470b472/172.17.0.2:0 2024-11-23T17:05:36,252 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;1a7c5470b472:43453 2024-11-23T17:05:36,256 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:05:36,256 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:05:36,256 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:05:36,257 INFO [RS:1;1a7c5470b472:43453 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:36,257 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:05:36,257 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;1a7c5470b472:44711 2024-11-23T17:05:36,257 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:05:36,257 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:05:36,257 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1a7c5470b472:37875 2024-11-23T17:05:36,258 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:05:36,258 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:05:36,258 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:05:36,258 INFO [RS:2;1a7c5470b472:44711 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:36,258 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:05:36,258 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:05:36,259 INFO [RS:0;1a7c5470b472:37875 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:36,259 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:05:36,260 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=43453, startcode=1732381534825 2024-11-23T17:05:36,260 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=37875, startcode=1732381534675 2024-11-23T17:05:36,260 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=44711, startcode=1732381534872 2024-11-23T17:05:36,276 DEBUG [RS:2;1a7c5470b472:44711 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:05:36,276 DEBUG [RS:1;1a7c5470b472:43453 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:05:36,276 DEBUG [RS:0;1a7c5470b472:37875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:05:36,315 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T17:05:36,321 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38185, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:05:36,322 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57097, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:05:36,322 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:05:36,329 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T17:05:36,329 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T17:05:36,336 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T17:05:36,337 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T17:05:36,339 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T17:05:36,346 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1a7c5470b472,46159,1732381533870 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T17:05:36,359 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1a7c5470b472:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:05:36,359 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1a7c5470b472:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1a7c5470b472:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1a7c5470b472:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1a7c5470b472:0, corePoolSize=10, maxPoolSize=10 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1a7c5470b472:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:05:36,360 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,366 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732381566366 2024-11-23T17:05:36,367 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:05:36,367 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:05:36,367 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:05:36,367 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:05:36,367 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T17:05:36,368 WARN [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:05:36,368 WARN [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:05:36,368 WARN [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:05:36,368 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T17:05:36,369 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T17:05:36,373 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T17:05:36,373 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T17:05:36,376 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T17:05:36,376 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T17:05:36,379 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:36,379 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T17:05:36,384 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,389 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T17:05:36,392 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T17:05:36,392 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T17:05:36,402 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T17:05:36,402 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T17:05:36,408 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.large.0-1732381536404,5,FailOnTimeoutGroup] 2024-11-23T17:05:36,412 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.small.0-1732381536408,5,FailOnTimeoutGroup] 2024-11-23T17:05:36,413 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,413 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T17:05:36,414 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,415 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:05:36,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:05:36,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:05:36,420 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T17:05:36,421 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:05:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:05:36,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:05:36,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:36,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T17:05:36,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T17:05:36,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:36,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:36,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T17:05:36,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T17:05:36,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:36,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:36,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T17:05:36,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T17:05:36,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:36,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:36,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T17:05:36,467 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T17:05:36,467 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:36,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:36,468 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T17:05:36,469 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=37875, startcode=1732381534675 2024-11-23T17:05:36,469 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=43453, startcode=1732381534825 2024-11-23T17:05:36,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:05:36,470 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(2659): reportForDuty to master=1a7c5470b472,46159,1732381533870 with port=44711, startcode=1732381534872 2024-11-23T17:05:36,471 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:05:36,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T17:05:36,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T17:05:36,475 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(517): Registering regionserver=1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,475 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:05:36,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T17:05:36,481 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:36,482 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59341147, jitterRate=-0.11574800312519073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:05:36,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732381536450Initializing all the Stores at 1732381536452 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381536452Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381536452Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381536452Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381536452Cleaning up temporary data from old regions at 1732381536474 (+22 ms)Region opened successfully at 1732381536484 (+10 ms) 2024-11-23T17:05:36,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T17:05:36,485 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T17:05:36,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T17:05:36,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T17:05:36,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T17:05:36,486 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,486 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(517): Registering regionserver=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,486 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:36,487 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35431 2024-11-23T17:05:36,487 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:05:36,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:05:36,492 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:36,492 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35431 2024-11-23T17:05:36,492 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:05:36,493 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,493 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] master.ServerManager(517): Registering regionserver=1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,493 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T17:05:36,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732381536485Disabling compacts and flushes for region at 1732381536485Disabling writes for close at 1732381536485Writing region close event to WAL at 1732381536493 (+8 ms)Closed at 1732381536493 2024-11-23T17:05:36,493 DEBUG [RS:1;1a7c5470b472:43453 {}] zookeeper.ZKUtil(111): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,493 WARN [RS:1;1a7c5470b472:43453 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:05:36,494 INFO [RS:1;1a7c5470b472:43453 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:05:36,494 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,505 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:05:36,505 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T17:05:36,506 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:36,506 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35431 2024-11-23T17:05:36,506 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:05:36,507 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a7c5470b472,43453,1732381534825] 2024-11-23T17:05:36,507 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a7c5470b472,37875,1732381534675] 2024-11-23T17:05:36,514 DEBUG [RS:0;1a7c5470b472:37875 {}] zookeeper.ZKUtil(111): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:05:36,514 DEBUG [RS:2;1a7c5470b472:44711 {}] zookeeper.ZKUtil(111): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,514 WARN [RS:2;1a7c5470b472:44711 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:05:36,514 INFO [RS:2;1a7c5470b472:44711 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:05:36,515 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,515 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1a7c5470b472,44711,1732381534872] 2024-11-23T17:05:36,516 WARN [RS:0;1a7c5470b472:37875 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:05:36,516 INFO [RS:0;1a7c5470b472:37875 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:05:36,516 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T17:05:36,533 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T17:05:36,544 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T17:05:36,551 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:05:36,551 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:05:36,552 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:05:36,571 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:05:36,572 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:05:36,572 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:05:36,579 INFO [RS:1;1a7c5470b472:43453 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:05:36,579 INFO [RS:2;1a7c5470b472:44711 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:05:36,579 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,579 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,585 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:05:36,589 INFO [RS:0;1a7c5470b472:37875 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:05:36,589 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,589 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:05:36,591 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:05:36,594 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:05:36,594 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:05:36,596 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,596 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,596 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,596 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,596 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,596 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,596 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a7c5470b472:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a7c5470b472:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,597 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,598 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,598 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,598 DEBUG [RS:0;1a7c5470b472:37875 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,598 DEBUG [RS:1;1a7c5470b472:43453 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,611 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:05:36,612 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,612 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,612 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,612 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,612 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1a7c5470b472:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,613 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,614 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1a7c5470b472:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:05:36,614 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,614 DEBUG [RS:2;1a7c5470b472:44711 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1a7c5470b472:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:05:36,620 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,621 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,621 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,621 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,621 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,621 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,37875,1732381534675-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:05:36,628 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,629 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,629 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,629 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,629 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,629 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,44711,1732381534872-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:05:36,632 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,632 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,633 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,633 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,633 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,633 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,43453,1732381534825-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:05:36,659 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:05:36,662 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:05:36,662 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,37875,1732381534675-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,662 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,44711,1732381534872-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,663 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,663 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,663 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.Replication(171): 1a7c5470b472,44711,1732381534872 started 2024-11-23T17:05:36,663 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.Replication(171): 1a7c5470b472,37875,1732381534675 started 2024-11-23T17:05:36,668 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:05:36,668 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,43453,1732381534825-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,668 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,669 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.Replication(171): 1a7c5470b472,43453,1732381534825 started 2024-11-23T17:05:36,690 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,691 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1482): Serving as 1a7c5470b472,43453,1732381534825, RpcServer on 1a7c5470b472/172.17.0.2:43453, sessionid=0x10145ca4ecf0002 2024-11-23T17:05:36,692 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:05:36,692 DEBUG [RS:1;1a7c5470b472:43453 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,692 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,43453,1732381534825' 2024-11-23T17:05:36,693 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:05:36,694 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:05:36,695 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:05:36,695 WARN [1a7c5470b472:46159 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T17:05:36,695 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:05:36,695 DEBUG [RS:1;1a7c5470b472:43453 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a7c5470b472,43453,1732381534825 2024-11-23T17:05:36,695 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,43453,1732381534825' 2024-11-23T17:05:36,695 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:05:36,696 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:05:36,696 DEBUG [RS:1;1a7c5470b472:43453 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:05:36,697 INFO [RS:1;1a7c5470b472:43453 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:05:36,697 INFO [RS:1;1a7c5470b472:43453 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:05:36,700 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,700 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:36,700 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1482): Serving as 1a7c5470b472,37875,1732381534675, RpcServer on 1a7c5470b472/172.17.0.2:37875, sessionid=0x10145ca4ecf0001 2024-11-23T17:05:36,700 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1482): Serving as 1a7c5470b472,44711,1732381534872, RpcServer on 1a7c5470b472/172.17.0.2:44711, sessionid=0x10145ca4ecf0003 2024-11-23T17:05:36,701 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:05:36,701 DEBUG [RS:2;1a7c5470b472:44711 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,701 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,44711,1732381534872' 2024-11-23T17:05:36,701 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:05:36,701 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:05:36,702 DEBUG [RS:0;1a7c5470b472:37875 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,702 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,37875,1732381534675' 2024-11-23T17:05:36,702 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:05:36,702 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:05:36,703 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:05:36,703 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:05:36,703 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:05:36,703 DEBUG [RS:2;1a7c5470b472:44711 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a7c5470b472,44711,1732381534872 2024-11-23T17:05:36,703 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,44711,1732381534872' 2024-11-23T17:05:36,703 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:05:36,704 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:05:36,705 DEBUG [RS:2;1a7c5470b472:44711 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:05:36,705 INFO [RS:2;1a7c5470b472:44711 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:05:36,705 INFO [RS:2;1a7c5470b472:44711 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:05:36,708 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:05:36,708 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:05:36,708 DEBUG [RS:0;1a7c5470b472:37875 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,708 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1a7c5470b472,37875,1732381534675' 2024-11-23T17:05:36,708 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:05:36,709 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:05:36,712 DEBUG [RS:0;1a7c5470b472:37875 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:05:36,712 INFO [RS:0;1a7c5470b472:37875 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:05:36,712 INFO [RS:0;1a7c5470b472:37875 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:05:36,804 INFO [RS:1;1a7c5470b472:43453 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:05:36,806 INFO [RS:2;1a7c5470b472:44711 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:05:36,808 INFO [RS:1;1a7c5470b472:43453 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C43453%2C1732381534825, suffix=, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,43453,1732381534825, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs, maxLogs=32 2024-11-23T17:05:36,810 INFO [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C44711%2C1732381534872, suffix=, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs, maxLogs=32 2024-11-23T17:05:36,813 INFO [RS:0;1a7c5470b472:37875 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:05:36,826 INFO [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C37875%2C1732381534675, suffix=, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs, maxLogs=32 2024-11-23T17:05:36,830 DEBUG [RS:2;1a7c5470b472:44711 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872/1a7c5470b472%2C44711%2C1732381534872.1732381536812, exclude list is [], retry=0 2024-11-23T17:05:36,833 DEBUG [RS:1;1a7c5470b472:43453 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,43453,1732381534825/1a7c5470b472%2C43453%2C1732381534825.1732381536812, exclude list is [], retry=0 2024-11-23T17:05:36,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:05:36,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:05:36,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:05:36,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:05:36,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:05:36,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:05:36,868 DEBUG [RS:0;1a7c5470b472:37875 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675/1a7c5470b472%2C37875%2C1732381534675.1732381536828, exclude list is [], retry=0 2024-11-23T17:05:36,871 INFO [RS:1;1a7c5470b472:43453 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,43453,1732381534825/1a7c5470b472%2C43453%2C1732381534825.1732381536812 2024-11-23T17:05:36,871 INFO [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872/1a7c5470b472%2C44711%2C1732381534872.1732381536812 2024-11-23T17:05:36,872 DEBUG [RS:1;1a7c5470b472:43453 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:46009:46009),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-23T17:05:36,872 DEBUG [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:46009:46009),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-23T17:05:36,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:05:36,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:05:36,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:05:36,889 INFO [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675/1a7c5470b472%2C37875%2C1732381534675.1732381536828 2024-11-23T17:05:36,890 DEBUG [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:46009:46009),(127.0.0.1/127.0.0.1:40223:40223)] 2024-11-23T17:05:36,948 DEBUG [1a7c5470b472:46159 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-23T17:05:36,957 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:05:36,964 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:05:36,964 INFO [1a7c5470b472:46159 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:05:36,965 INFO [1a7c5470b472:46159 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:05:36,965 INFO [1a7c5470b472:46159 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:05:36,965 DEBUG [1a7c5470b472:46159 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:05:36,973 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:36,985 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a7c5470b472,37875,1732381534675, state=OPENING 2024-11-23T17:05:36,991 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T17:05:36,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:36,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:36,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:36,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:36,995 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:36,997 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T17:05:36,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:37,193 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:05:37,196 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39077, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:05:37,222 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T17:05:37,222 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:05:37,223 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T17:05:37,227 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C37875%2C1732381534675.meta, suffix=.meta, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs, maxLogs=32 2024-11-23T17:05:37,253 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675/1a7c5470b472%2C37875%2C1732381534675.meta.1732381537230.meta, exclude list is [], retry=0 2024-11-23T17:05:37,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:05:37,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:05:37,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:05:37,274 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,37875,1732381534675/1a7c5470b472%2C37875%2C1732381534675.meta.1732381537230.meta 2024-11-23T17:05:37,276 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40223:40223),(127.0.0.1/127.0.0.1:46009:46009),(127.0.0.1/127.0.0.1:35159:35159)] 2024-11-23T17:05:37,277 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:05:37,278 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-23T17:05:37,279 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:37,281 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T17:05:37,283 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T17:05:37,285 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T17:05:37,294 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T17:05:37,295 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:37,295 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T17:05:37,295 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T17:05:37,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T17:05:37,300 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T17:05:37,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:37,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:37,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T17:05:37,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T17:05:37,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:37,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:37,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T17:05:37,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T17:05:37,306 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:37,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:37,307 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T17:05:37,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T17:05:37,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:37,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:05:37,309 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T17:05:37,311 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:05:37,315 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:05:37,322 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T17:05:37,323 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T17:05:37,324 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:05:37,327 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T17:05:37,330 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61990573, jitterRate=-0.07626847922801971}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:05:37,331 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T17:05:37,336 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732381537296Writing region info on filesystem at 1732381537296Initializing all the Stores at 1732381537298 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381537298Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381537298Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381537298Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381537298Cleaning up temporary data from old regions at 1732381537323 (+25 ms)Running coprocessor post-open hooks at 1732381537331 (+8 ms)Region opened successfully at 1732381537336 (+5 ms) 2024-11-23T17:05:37,346 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732381537180 2024-11-23T17:05:37,365 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T17:05:37,366 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T17:05:37,367 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:37,370 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a7c5470b472,37875,1732381534675, state=OPEN 2024-11-23T17:05:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:05:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:05:37,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:05:37,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:37,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:37,373 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:37,373 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:37,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:05:37,374 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:05:37,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T17:05:37,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675 in 374 msec 2024-11-23T17:05:37,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T17:05:37,398 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 863 msec 2024-11-23T17:05:37,403 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:05:37,403 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T17:05:37,432 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:37,433 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:37,466 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:37,470 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57863, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:37,507 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2640 sec 2024-11-23T17:05:37,508 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732381537508, completionTime=-1 2024-11-23T17:05:37,511 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-23T17:05:37,512 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T17:05:37,564 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-23T17:05:37,564 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732381597564 2024-11-23T17:05:37,564 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732381657564 2024-11-23T17:05:37,565 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 52 msec 2024-11-23T17:05:37,566 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:05:37,574 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,574 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,575 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,577 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1a7c5470b472:46159, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,578 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,579 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:37,585 DEBUG [master/1a7c5470b472:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T17:05:37,609 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.599sec 2024-11-23T17:05:37,611 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T17:05:37,612 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T17:05:37,613 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T17:05:37,614 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T17:05:37,614 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T17:05:37,615 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:05:37,615 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T17:05:37,631 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58213e25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:37,636 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T17:05:37,636 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T17:05:37,641 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:37,644 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:37,653 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:05:37,654 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 1a7c5470b472,46159,1732381533870 2024-11-23T17:05:37,655 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:37,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:37,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:37,658 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10c991b4 2024-11-23T17:05:37,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a43589e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:37,658 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:37,659 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:05:37,661 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59303, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:05:37,663 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:37,706 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:37,707 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:37,707 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T17:05:37,717 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69f7514f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:37,717 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:37,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-23T17:05:37,724 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:05:37,725 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:37,728 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:05:37,729 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-23T17:05:37,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:05:37,744 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:37,744 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:37,760 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58404, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:37,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=1a7c5470b472,46159,1732381533870 2024-11-23T17:05:37,762 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/test.cache.data in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:05:37,763 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/nfs.dump.dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T17:05:37,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T17:05:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:05:37,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:05:37,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:05:37,820 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 64dcd7fd9fc755d9ffeb5ce90b3151ce, NAME => 'hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:05:37,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741838_1014 (size=592039) 2024-11-23T17:05:37,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741838_1014 (size=592039) 2024-11-23T17:05:37,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741838_1014 (size=592039) 2024-11-23T17:05:37,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741839_1015 (size=36) 2024-11-23T17:05:37,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741839_1015 (size=36) 2024-11-23T17:05:37,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741839_1015 (size=36) 2024-11-23T17:05:37,873 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:37,874 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 64dcd7fd9fc755d9ffeb5ce90b3151ce, disabling compactions & flushes 2024-11-23T17:05:37,874 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:37,874 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:37,874 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. after waiting 0 ms 2024-11-23T17:05:37,874 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:37,874 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:37,874 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 64dcd7fd9fc755d9ffeb5ce90b3151ce: Waiting for close lock at 1732381537874Disabling compacts and flushes for region at 1732381537874Disabling writes for close at 1732381537874Writing region close event to WAL at 1732381537874Closed at 1732381537874 2024-11-23T17:05:37,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:05:37,884 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732381537879"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381537879"}]},"ts":"1732381537879"} 2024-11-23T17:05:37,894 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T17:05:37,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:05:37,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:05:37,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:05:37,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:05:37,900 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381537896"}]},"ts":"1732381537896"} 2024-11-23T17:05:37,905 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-23T17:05:37,906 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:05:37,907 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:05:37,907 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:05:37,908 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:05:37,908 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:05:37,908 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:05:37,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=64dcd7fd9fc755d9ffeb5ce90b3151ce, ASSIGN}] 2024-11-23T17:05:37,912 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=64dcd7fd9fc755d9ffeb5ce90b3151ce, ASSIGN 2024-11-23T17:05:37,916 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=64dcd7fd9fc755d9ffeb5ce90b3151ce, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:05:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:05:38,071 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:05:38,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=64dcd7fd9fc755d9ffeb5ce90b3151ce, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:38,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=64dcd7fd9fc755d9ffeb5ce90b3151ce, ASSIGN because future has completed 2024-11-23T17:05:38,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 64dcd7fd9fc755d9ffeb5ce90b3151ce, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:05:38,544 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:38,545 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 64dcd7fd9fc755d9ffeb5ce90b3151ce, NAME => 'hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:05:38,545 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. service=AccessControlService 2024-11-23T17:05:38,548 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:38,549 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,549 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:38,549 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,549 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,561 INFO [StoreOpener-64dcd7fd9fc755d9ffeb5ce90b3151ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,564 INFO [StoreOpener-64dcd7fd9fc755d9ffeb5ce90b3151ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 64dcd7fd9fc755d9ffeb5ce90b3151ce columnFamilyName l 2024-11-23T17:05:38,564 DEBUG [StoreOpener-64dcd7fd9fc755d9ffeb5ce90b3151ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:38,567 INFO [StoreOpener-64dcd7fd9fc755d9ffeb5ce90b3151ce-1 {}] regionserver.HStore(327): Store=64dcd7fd9fc755d9ffeb5ce90b3151ce/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:38,568 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,569 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,570 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,571 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,571 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,578 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,587 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:38,589 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 64dcd7fd9fc755d9ffeb5ce90b3151ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65512025, jitterRate=-0.023794755339622498}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:38,589 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:05:38,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 64dcd7fd9fc755d9ffeb5ce90b3151ce: Running coprocessor pre-open hook at 1732381538549Writing region info on filesystem at 1732381538550 (+1 ms)Initializing all the Stores at 1732381538552 (+2 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381538552Cleaning up temporary data from old regions at 1732381538571 (+19 ms)Running coprocessor post-open hooks at 1732381538589 (+18 ms)Region opened successfully at 1732381538592 (+3 ms) 2024-11-23T17:05:38,595 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., pid=6, masterSystemTime=1732381538484 2024-11-23T17:05:38,604 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:38,605 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:05:38,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=64dcd7fd9fc755d9ffeb5ce90b3151ce, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:38,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 64dcd7fd9fc755d9ffeb5ce90b3151ce, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:38,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T17:05:38,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 64dcd7fd9fc755d9ffeb5ce90b3151ce, server=1a7c5470b472,37875,1732381534675 in 537 msec 2024-11-23T17:05:38,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T17:05:38,633 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=64dcd7fd9fc755d9ffeb5ce90b3151ce, ASSIGN in 712 msec 2024-11-23T17:05:38,634 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:05:38,635 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381538634"}]},"ts":"1732381538634"} 2024-11-23T17:05:38,639 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-23T17:05:38,641 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:05:38,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 930 msec 2024-11-23T17:05:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:05:38,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-23T17:05:38,894 DEBUG [master/1a7c5470b472:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T17:05:38,895 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T17:05:38,897 INFO [master/1a7c5470b472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1a7c5470b472,46159,1732381533870-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:05:39,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:39,732 WARN [Thread-385 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:40,051 INFO [Thread-385 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:40,052 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:05:40,053 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:40,077 INFO [Thread-385 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:40,077 INFO [Thread-385 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:40,077 INFO [Thread-385 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:05:40,086 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1300e646{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:40,086 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43bd3a04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:40,157 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:40,157 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:40,157 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:05:40,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:40,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4543a73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:40,192 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9ebb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:40,285 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:05:40,285 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-23T17:05:40,285 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:05:40,288 INFO [Thread-385 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:05:40,354 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:40,778 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:41,087 INFO [Thread-385 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:41,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ad0a267{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-44875-hadoop-yarn-common-3_4_1_jar-_-any-14615802023156631921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T17:05:41,117 INFO [Thread-385 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@359f3c9c{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-45245-hadoop-yarn-common-3_4_1_jar-_-any-16961863333975600853/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T17:05:41,118 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79846073{HTTP/1.1, (http/1.1)}{localhost:44875} 2024-11-23T17:05:41,118 INFO [Time-limited test {}] server.Server(415): Started @14108ms 2024-11-23T17:05:41,118 INFO [Thread-385 {}] server.AbstractConnector(333): Started ServerConnector@49b458b8{HTTP/1.1, (http/1.1)}{localhost:45245} 2024-11-23T17:05:41,119 INFO [Thread-385 {}] server.Server(415): Started @14108ms 2024-11-23T17:05:41,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:05:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:05:41,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:05:42,598 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T17:05:42,603 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:42,644 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:05:42,645 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:42,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:42,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:42,660 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:05:42,662 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:42,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ec8b503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:42,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cd7c925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:42,752 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T17:05:42,752 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:05:42,752 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:05:42,753 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:05:42,795 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:42,815 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:05:42,841 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:42,971 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T17:05:42,973 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-23T17:05:43,049 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:43,083 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49461f55{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-37549-hadoop-yarn-common-3_4_1_jar-_-any-18219146918671795654/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:05:43,091 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41773a70{HTTP/1.1, (http/1.1)}{localhost:37549} 2024-11-23T17:05:43,091 INFO [Time-limited test {}] server.Server(415): Started @16080ms 2024-11-23T17:05:43,435 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T17:05:43,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:43,458 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:05:43,459 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:05:43,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:05:43,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:05:43,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:05:43,470 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:05:43,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e5d7b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:05:43,471 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35fb350b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:05:43,540 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T17:05:43,540 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:05:43,540 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:05:43,541 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:05:43,554 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:43,560 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:43,665 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:05:43,671 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@56ef7273{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/java.io.tmpdir/jetty-localhost-43705-hadoop-yarn-common-3_4_1_jar-_-any-17743737627272562812/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:05:43,672 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6075dd8f{HTTP/1.1, (http/1.1)}{localhost:43705} 2024-11-23T17:05:43,674 INFO [Time-limited test {}] server.Server(415): Started @16661ms 2024-11-23T17:05:43,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-23T17:05:43,723 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:05:43,769 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=717, OpenFileDescriptor=784, MaxFileDescriptor=1048576, SystemLoadAverage=233, ProcessCount=11, AvailableMemoryMB=7445 2024-11-23T17:05:43,774 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=717 is superior to 500 2024-11-23T17:05:43,781 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:05:43,790 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a7c5470b472,46159,1732381533870 2024-11-23T17:05:43,790 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4d2c776c 2024-11-23T17:05:43,790 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:05:43,796 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36400, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:05:43,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:05:43,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:43,813 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:05:43,814 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-11-23T17:05:43,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:05:43,817 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:05:43,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741842_1018 (size=458) 2024-11-23T17:05:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741842_1018 (size=458) 2024-11-23T17:05:43,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741842_1018 (size=458) 2024-11-23T17:05:43,876 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 42e2b13e79d70de0ccaa034b156e7d6e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:43,885 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 7c56c74f89526c98c0086020fd89c555, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:43,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:05:43,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:05:43,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:05:43,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:05:43,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:43,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 7c56c74f89526c98c0086020fd89c555, disabling compactions & flushes 2024-11-23T17:05:43,935 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:43,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:43,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. after waiting 0 ms 2024-11-23T17:05:43,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:43,935 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:43,936 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 7c56c74f89526c98c0086020fd89c555: Waiting for close lock at 1732381543935Disabling compacts and flushes for region at 1732381543935Disabling writes for close at 1732381543935Writing region close event to WAL at 1732381543935Closed at 1732381543935 2024-11-23T17:05:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:05:43,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:05:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:05:43,941 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:43,941 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 42e2b13e79d70de0ccaa034b156e7d6e, disabling compactions & flushes 2024-11-23T17:05:43,941 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:43,941 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:43,942 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. after waiting 0 ms 2024-11-23T17:05:43,942 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:43,942 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:43,942 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 42e2b13e79d70de0ccaa034b156e7d6e: Waiting for close lock at 1732381543941Disabling compacts and flushes for region at 1732381543941Disabling writes for close at 1732381543942 (+1 ms)Writing region close event to WAL at 1732381543942Closed at 1732381543942 2024-11-23T17:05:43,945 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:05:43,945 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732381543945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381543945"}]},"ts":"1732381543945"} 2024-11-23T17:05:43,946 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732381543945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381543945"}]},"ts":"1732381543945"} 2024-11-23T17:05:44,001 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:05:44,004 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:05:44,005 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381544004"}]},"ts":"1732381544004"} 2024-11-23T17:05:44,009 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T17:05:44,010 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:05:44,013 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:05:44,013 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:05:44,013 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:05:44,013 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:05:44,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, ASSIGN}] 2024-11-23T17:05:44,017 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, ASSIGN 2024-11-23T17:05:44,020 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, ASSIGN 2024-11-23T17:05:44,024 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:05:44,025 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:05:44,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:05:44,175 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:05:44,175 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=42e2b13e79d70de0ccaa034b156e7d6e, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:05:44,175 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7c56c74f89526c98c0086020fd89c555, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:44,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, ASSIGN because future has completed 2024-11-23T17:05:44,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:05:44,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, ASSIGN because future has completed 2024-11-23T17:05:44,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:44,343 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:05:44,344 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-23T17:05:44,344 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T17:05:44,344 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T17:05:44,347 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T17:05:44,347 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:05:44,349 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:05:44,349 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-23T17:05:44,350 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:05:44,350 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-23T17:05:44,350 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:05:44,350 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-23T17:05:44,350 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:05:44,350 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T17:05:44,351 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T17:05:44,351 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T17:05:44,361 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:05:44,374 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:44,376 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 7c56c74f89526c98c0086020fd89c555, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:05:44,377 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. service=AccessControlService 2024-11-23T17:05:44,377 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:44,378 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,378 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:44,378 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,378 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,384 INFO [StoreOpener-7c56c74f89526c98c0086020fd89c555-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,385 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60103, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:05:44,394 INFO [StoreOpener-7c56c74f89526c98c0086020fd89c555-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c56c74f89526c98c0086020fd89c555 columnFamilyName cf 2024-11-23T17:05:44,400 DEBUG [StoreOpener-7c56c74f89526c98c0086020fd89c555-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:44,402 INFO [StoreOpener-7c56c74f89526c98c0086020fd89c555-1 {}] regionserver.HStore(327): Store=7c56c74f89526c98c0086020fd89c555/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:44,403 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,404 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:44,404 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,404 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 42e2b13e79d70de0ccaa034b156e7d6e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:05:44,405 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. service=AccessControlService 2024-11-23T17:05:44,405 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,405 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:44,405 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,406 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:44,406 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,406 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,406 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,406 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,410 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,416 INFO [StoreOpener-42e2b13e79d70de0ccaa034b156e7d6e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,417 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:44,418 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 7c56c74f89526c98c0086020fd89c555; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69598279, jitterRate=0.03709517419338226}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:44,418 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:44,418 INFO [StoreOpener-42e2b13e79d70de0ccaa034b156e7d6e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 42e2b13e79d70de0ccaa034b156e7d6e columnFamilyName cf 2024-11-23T17:05:44,419 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 7c56c74f89526c98c0086020fd89c555: Running coprocessor pre-open hook at 1732381544378Writing region info on filesystem at 1732381544378Initializing all the Stores at 1732381544381 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381544381Cleaning up temporary data from old regions at 1732381544406 (+25 ms)Running coprocessor post-open hooks at 1732381544418 (+12 ms)Region opened successfully at 1732381544419 (+1 ms) 2024-11-23T17:05:44,420 DEBUG [StoreOpener-42e2b13e79d70de0ccaa034b156e7d6e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:44,421 INFO [StoreOpener-42e2b13e79d70de0ccaa034b156e7d6e-1 {}] regionserver.HStore(327): Store=42e2b13e79d70de0ccaa034b156e7d6e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:44,421 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555., pid=11, masterSystemTime=1732381544360 2024-11-23T17:05:44,421 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,422 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,423 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,424 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,424 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,425 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:44,425 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:44,426 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7c56c74f89526c98c0086020fd89c555, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:44,428 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:44,435 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=1a7c5470b472,37875,1732381534675, table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:05:44,434 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:44,436 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 42e2b13e79d70de0ccaa034b156e7d6e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73789802, jitterRate=0.09955373406410217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:44,436 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:44,437 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 42e2b13e79d70de0ccaa034b156e7d6e: Running coprocessor pre-open hook at 1732381544406Writing region info on filesystem at 1732381544406Initializing all the Stores at 1732381544409 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381544409Cleaning up temporary data from old regions at 1732381544424 (+15 ms)Running coprocessor post-open hooks at 1732381544436 (+12 ms)Region opened successfully at 1732381544436 2024-11-23T17:05:44,442 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e., pid=10, masterSystemTime=1732381544361 2024-11-23T17:05:44,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:05:44,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-23T17:05:44,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675 in 254 msec 2024-11-23T17:05:44,452 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=42e2b13e79d70de0ccaa034b156e7d6e, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:05:44,453 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:44,453 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:44,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, ASSIGN in 438 msec 2024-11-23T17:05:44,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:05:44,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-23T17:05:44,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825 in 277 msec 2024-11-23T17:05:44,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T17:05:44,471 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, ASSIGN in 452 msec 2024-11-23T17:05:44,472 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:05:44,472 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381544472"}]},"ts":"1732381544472"} 2024-11-23T17:05:44,481 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T17:05:44,482 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:05:44,486 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T17:05:44,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:44,509 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:44,510 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:44,510 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:44,512 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33427, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-23T17:05:44,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:44,520 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:05:44,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:44,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:44,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:44,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:44,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:44,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:44,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:44,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:05:44,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 755 msec 2024-11-23T17:05:44,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:44,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:44,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:44,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:05:44,959 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:44,963 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:44,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:44,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:44,972 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:05:44,976 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:44,994 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,003 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:45,013 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45718, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:45,021 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,034 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381545034 (current time:1732381545034). 2024-11-23T17:05:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:05:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:05:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:05:45,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@421ead78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:45,045 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:45,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:45,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:45,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@746cc392, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:45,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:45,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,049 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36414, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:45,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@483af296, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:45,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:45,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:45,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:45,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:05:45,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:45,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,071 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3850c52c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:45,078 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:45,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:45,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:45,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b19120d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:45,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:45,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,080 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36420, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:45,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433a05a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:45,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:45,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:45,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:45,087 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:45,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:05:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,090 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:45,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:05:45,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:05:45,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:45,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T17:05:45,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:05:45,106 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:05:45,111 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:05:45,126 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:05:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:05:45,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:05:45,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:05:45,148 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:05:45,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555}] 2024-11-23T17:05:45,158 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:45,158 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:45,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:05:45,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-23T17:05:45,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-23T17:05:45,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:45,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:45,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 7c56c74f89526c98c0086020fd89c555: 2024-11-23T17:05:45,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 42e2b13e79d70de0ccaa034b156e7d6e: 2024-11-23T17:05:45,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:45,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:45,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,323 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:45,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:45,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:05:45,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:05:45,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:05:45,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:05:45,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:05:45,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:05:45,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:45,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:05:45,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:05:45,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:45,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-23T17:05:45,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T17:05:45,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-23T17:05:45,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-23T17:05:45,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:45,354 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:45,354 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:45,354 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:45,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e in 203 msec 2024-11-23T17:05:45,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-23T17:05:45,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 in 203 msec 2024-11-23T17:05:45,362 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:05:45,364 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:05:45,366 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:05:45,366 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:05:45,366 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:45,367 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:05:45,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741848_1024 (size=78) 2024-11-23T17:05:45,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741848_1024 (size=78) 2024-11-23T17:05:45,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741848_1024 (size=78) 2024-11-23T17:05:45,381 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:05:45,381 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,384 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741849_1025 (size=713) 2024-11-23T17:05:45,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741849_1025 (size=713) 2024-11-23T17:05:45,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741849_1025 (size=713) 2024-11-23T17:05:45,415 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:05:45,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:05:45,429 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:05:45,430 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,434 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:05:45,434 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T17:05:45,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 335 msec 2024-11-23T17:05:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:05:45,728 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:45,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:05:45,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:05:45,756 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,761 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:45,761 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:45,761 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:05:45,763 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,771 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,779 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:45,783 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381545783 (current time:1732381545783). 2024-11-23T17:05:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:05:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:05:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:05:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c393294, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:45,785 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:45,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:45,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:45,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f425071, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:45,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:45,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,788 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36430, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:45,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58977ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:45,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:45,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:45,793 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:45,794 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:05:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,795 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7876c924, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:45,797 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:45,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:45,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:45,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e2995, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:45,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:45,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,799 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36444, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:45,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71a61444, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:45,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:45,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:45,804 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:45,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:45,808 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:05:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:45,809 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:05:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:05:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:45,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T17:05:45,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:05:45,814 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:05:45,815 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:05:45,819 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:05:45,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741850_1026 (size=210) 2024-11-23T17:05:45,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741850_1026 (size=210) 2024-11-23T17:05:45,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741850_1026 (size=210) 2024-11-23T17:05:45,833 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:05:45,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555}] 2024-11-23T17:05:45,834 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:45,834 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:05:45,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-23T17:05:45,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-23T17:05:45,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:45,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:45,993 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 7c56c74f89526c98c0086020fd89c555 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T17:05:45,993 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 42e2b13e79d70de0ccaa034b156e7d6e 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T17:05:46,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 is 71, key is 15584ccc9b5a7799eb7828e4ddd62761/cf:q/1732381545752/Put/seqid=0 2024-11-23T17:05:46,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e is 71, key is 001d5d682a189eeef680573e9e9704ae/cf:q/1732381545746/Put/seqid=0 2024-11-23T17:05:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741852_1028 (size=8241) 2024-11-23T17:05:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741852_1028 (size=8241) 2024-11-23T17:05:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741852_1028 (size=8241) 2024-11-23T17:05:46,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:46,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741851_1027 (size=5032) 2024-11-23T17:05:46,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741851_1027 (size=5032) 2024-11-23T17:05:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:05:46,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741851_1027 (size=5032) 2024-11-23T17:05:46,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:46,214 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:46,214 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:46,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/.tmp/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=7c56c74f89526c98c0086020fd89c555] 2024-11-23T17:05:46,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/.tmp/cf/84fa8d68826649708ae50439ceb84146, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=42e2b13e79d70de0ccaa034b156e7d6e] 2024-11-23T17:05:46,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/.tmp/cf/84fa8d68826649708ae50439ceb84146 is 224, key is 017f22cf50405abf12022ca5466d18416/cf:q/1732381545746/Put/seqid=0 2024-11-23T17:05:46,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/.tmp/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 is 224, key is 16b02dcd1c709dd2c65156e173aef03c5/cf:q/1732381545752/Put/seqid=0 2024-11-23T17:05:46,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741853_1029 (size=15937) 2024-11-23T17:05:46,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741854_1030 (size=5754) 2024-11-23T17:05:46,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741854_1030 (size=5754) 2024-11-23T17:05:46,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741853_1029 (size=15937) 2024-11-23T17:05:46,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741853_1029 (size=15937) 2024-11-23T17:05:46,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741854_1030 (size=5754) 2024-11-23T17:05:46,261 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/.tmp/cf/84fa8d68826649708ae50439ceb84146 2024-11-23T17:05:46,261 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/.tmp/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 2024-11-23T17:05:46,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/.tmp/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 2024-11-23T17:05:46,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/.tmp/cf/84fa8d68826649708ae50439ceb84146 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146 2024-11-23T17:05:46,294 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8, entries=48, sequenceid=6, filesize=15.6 K 2024-11-23T17:05:46,294 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146, entries=2, sequenceid=6, filesize=5.6 K 2024-11-23T17:05:46,305 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7c56c74f89526c98c0086020fd89c555 in 310ms, sequenceid=6, compaction requested=false 2024-11-23T17:05:46,305 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 42e2b13e79d70de0ccaa034b156e7d6e in 310ms, sequenceid=6, compaction requested=false 2024-11-23T17:05:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-23T17:05:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 42e2b13e79d70de0ccaa034b156e7d6e: 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 7c56c74f89526c98c0086020fd89c555: 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:46,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:46,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146] hfiles 2024-11-23T17:05:46,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8] hfiles 2024-11-23T17:05:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741855_1031 (size=125) 2024-11-23T17:05:46,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741855_1031 (size=125) 2024-11-23T17:05:46,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741855_1031 (size=125) 2024-11-23T17:05:46,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:05:46,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T17:05:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-23T17:05:46,404 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:46,404 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:46,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7c56c74f89526c98c0086020fd89c555 in 573 msec 2024-11-23T17:05:46,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741856_1032 (size=125) 2024-11-23T17:05:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741856_1032 (size=125) 2024-11-23T17:05:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741856_1032 (size=125) 2024-11-23T17:05:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:05:46,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:05:46,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-23T17:05:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-23T17:05:46,457 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:46,458 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:46,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-23T17:05:46,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e in 627 msec 2024-11-23T17:05:46,466 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:05:46,467 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:05:46,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:05:46,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:05:46,476 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:46,484 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e] hfiles 2024-11-23T17:05:46,484 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 2024-11-23T17:05:46,485 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:05:46,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741857_1033 (size=309) 2024-11-23T17:05:46,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741857_1033 (size=309) 2024-11-23T17:05:46,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741857_1033 (size=309) 2024-11-23T17:05:46,563 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:05:46,564 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,565 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741858_1034 (size=1023) 2024-11-23T17:05:46,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741858_1034 (size=1023) 2024-11-23T17:05:46,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741858_1034 (size=1023) 2024-11-23T17:05:46,655 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:05:46,681 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:05:46,682 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,686 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:05:46,686 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T17:05:46,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 875 msec 2024-11-23T17:05:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:05:46,958 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:46,980 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:05:46,981 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:05:46,981 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:05:46,982 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:05:46,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45730, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:05:46,983 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:05:46,983 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:05:46,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44610, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:05:46,983 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:05:46,986 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:05:46,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:46,989 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:05:46,989 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:46,989 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-11-23T17:05:46,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:05:46,990 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:05:47,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741859_1035 (size=390) 2024-11-23T17:05:47,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741859_1035 (size=390) 2024-11-23T17:05:47,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741859_1035 (size=390) 2024-11-23T17:05:47,002 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5b0c07975ee7c61139f3fcd802f8c8d2, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741860_1036 (size=75) 2024-11-23T17:05:47,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741860_1036 (size=75) 2024-11-23T17:05:47,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741860_1036 (size=75) 2024-11-23T17:05:47,020 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:47,020 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 5b0c07975ee7c61139f3fcd802f8c8d2, disabling compactions & flushes 2024-11-23T17:05:47,020 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,020 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,020 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. after waiting 0 ms 2024-11-23T17:05:47,020 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,021 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,021 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5b0c07975ee7c61139f3fcd802f8c8d2: Waiting for close lock at 1732381547020Disabling compacts and flushes for region at 1732381547020Disabling writes for close at 1732381547020Writing region close event to WAL at 1732381547020Closed at 1732381547020 2024-11-23T17:05:47,023 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:05:47,023 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732381547023"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381547023"}]},"ts":"1732381547023"} 2024-11-23T17:05:47,027 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T17:05:47,029 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:05:47,030 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381547029"}]},"ts":"1732381547029"} 2024-11-23T17:05:47,033 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T17:05:47,033 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:05:47,035 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:05:47,035 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:05:47,035 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:05:47,035 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:05:47,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, ASSIGN}] 2024-11-23T17:05:47,037 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, ASSIGN 2024-11-23T17:05:47,038 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:05:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:05:47,189 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:05:47,189 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=5b0c07975ee7c61139f3fcd802f8c8d2, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:47,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, ASSIGN because future has completed 2024-11-23T17:05:47,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:05:47,355 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,355 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 5b0c07975ee7c61139f3fcd802f8c8d2, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:05:47,356 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. service=AccessControlService 2024-11-23T17:05:47,356 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:47,357 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,357 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:47,357 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,357 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,369 INFO [StoreOpener-5b0c07975ee7c61139f3fcd802f8c8d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,373 INFO [StoreOpener-5b0c07975ee7c61139f3fcd802f8c8d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5b0c07975ee7c61139f3fcd802f8c8d2 columnFamilyName cf 2024-11-23T17:05:47,373 DEBUG [StoreOpener-5b0c07975ee7c61139f3fcd802f8c8d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:47,374 INFO [StoreOpener-5b0c07975ee7c61139f3fcd802f8c8d2-1 {}] regionserver.HStore(327): Store=5b0c07975ee7c61139f3fcd802f8c8d2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:47,374 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,375 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,376 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,377 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,377 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,386 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,394 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:05:47,395 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 5b0c07975ee7c61139f3fcd802f8c8d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62042425, jitterRate=-0.07549582421779633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:47,395 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:47,396 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 5b0c07975ee7c61139f3fcd802f8c8d2: Running coprocessor pre-open hook at 1732381547357Writing region info on filesystem at 1732381547357Initializing all the Stores at 1732381547359 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381547359Cleaning up temporary data from old regions at 1732381547377 (+18 ms)Running coprocessor post-open hooks at 1732381547395 (+18 ms)Region opened successfully at 1732381547396 (+1 ms) 2024-11-23T17:05:47,400 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., pid=20, masterSystemTime=1732381547346 2024-11-23T17:05:47,403 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,403 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:47,404 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=5b0c07975ee7c61139f3fcd802f8c8d2, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:47,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:47,414 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-23T17:05:47,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675 in 218 msec 2024-11-23T17:05:47,423 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:05:47,423 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381547423"}]},"ts":"1732381547423"} 2024-11-23T17:05:47,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T17:05:47,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, ASSIGN in 380 msec 2024-11-23T17:05:47,426 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T17:05:47,427 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:05:47,428 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T17:05:47,435 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:05:47,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:47,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:47,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:47,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:05:47,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:47,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:47,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,443 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,445 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,445 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,445 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,445 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,446 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:05:47,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 458 msec 2024-11-23T17:05:47,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:05:47,619 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:47,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:47,623 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:05:48,975 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-11-23T17:05:49,882 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:05:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741861_1037 (size=134217728) 2024-11-23T17:05:50,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741861_1037 (size=134217728) 2024-11-23T17:05:50,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741861_1037 (size=134217728) 2024-11-23T17:05:52,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741862_1038 (size=134217728) 2024-11-23T17:05:52,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741862_1038 (size=134217728) 2024-11-23T17:05:52,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741862_1038 (size=134217728) 2024-11-23T17:05:53,234 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1732381547629/Put/seqid=0 2024-11-23T17:05:53,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741863_1039 (size=51979256) 2024-11-23T17:05:53,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741863_1039 (size=51979256) 2024-11-23T17:05:53,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741863_1039 (size=51979256) 2024-11-23T17:05:53,648 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@965e327, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:53,649 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:53,649 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:53,654 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:53,655 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:53,655 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:53,656 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7982e6dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:53,656 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:53,656 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:53,659 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:53,661 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56552, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:53,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@313ae80d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:53,663 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:53,665 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:53,665 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:53,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51762, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:53,688 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:35431/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T17:05:53,689 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:05:53,690 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 1a7c5470b472,46159,1732381533870 2024-11-23T17:05:53,691 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c71f3e8 2024-11-23T17:05:53,691 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:05:53,693 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:05:53,701 WARN [IPC Server handler 3 on default port 35431 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T17:05:53,709 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:53,725 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:53,750 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35431/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-11-23T17:05:53,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-23T17:05:53,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:33427 deadline: 1732381613781, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-11-23T17:05:53,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:05:53,792 WARN [IPC Server handler 1 on default port 35431 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T17:05:53,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35431/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file for inclusion in 5b0c07975ee7c61139f3fcd802f8c8d2/cf 2024-11-23T17:05:53,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-11-23T17:05:53,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-23T17:05:53,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:35431/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T17:05:53,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(2603): Flush status journal for 5b0c07975ee7c61139f3fcd802f8c8d2: 2024-11-23T17:05:53,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35431/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/output/cf/test_file to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/staging/jenkins__testExportFileSystemStateWithSplitRegion__euckkmtt5odsv1n5s4thv8udfal5m4nic8fgiavq9jdaasmf8fjmnsknskquphm8/cf/test_file 2024-11-23T17:05:53,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/staging/jenkins__testExportFileSystemStateWithSplitRegion__euckkmtt5odsv1n5s4thv8udfal5m4nic8fgiavq9jdaasmf8fjmnsknskquphm8/cf/test_file as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ 2024-11-23T17:05:53,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/staging/jenkins__testExportFileSystemStateWithSplitRegion__euckkmtt5odsv1n5s4thv8udfal5m4nic8fgiavq9jdaasmf8fjmnsknskquphm8/cf/test_file into 5b0c07975ee7c61139f3fcd802f8c8d2/cf as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ - updating store file list. 2024-11-23T17:05:53,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 22189fffabb8439a86a91175bf46eb55_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:05:53,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ into 5b0c07975ee7c61139f3fcd802f8c8d2/cf 2024-11-23T17:05:53,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/staging/jenkins__testExportFileSystemStateWithSplitRegion__euckkmtt5odsv1n5s4thv8udfal5m4nic8fgiavq9jdaasmf8fjmnsknskquphm8/cf/test_file into 5b0c07975ee7c61139f3fcd802f8c8d2/cf (new location: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_) 2024-11-23T17:05:53,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/staging/jenkins__testExportFileSystemStateWithSplitRegion__euckkmtt5odsv1n5s4thv8udfal5m4nic8fgiavq9jdaasmf8fjmnsknskquphm8/cf/test_file 2024-11-23T17:05:53,916 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:05:53,916 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:05:53,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:53,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:53,917 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:53,917 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:53,919 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=1a7c5470b472:37875 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T17:05:53,920 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T17:05:53,920 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2 from cache 2024-11-23T17:05:53,926 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-23T17:05:53,953 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:53,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:53,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:53,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=5b0c07975ee7c61139f3fcd802f8c8d2, daughterA=e8613a6d83b2cc2b96bf3bd7651c01ee, daughterB=226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:53,992 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=5b0c07975ee7c61139f3fcd802f8c8d2, daughterA=e8613a6d83b2cc2b96bf3bd7651c01ee, daughterB=226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:53,992 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=5b0c07975ee7c61139f3fcd802f8c8d2, daughterA=e8613a6d83b2cc2b96bf3bd7651c01ee, daughterB=226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:53,992 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=5b0c07975ee7c61139f3fcd802f8c8d2, daughterA=e8613a6d83b2cc2b96bf3bd7651c01ee, daughterB=226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:53,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:05:54,000 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, UNASSIGN}] 2024-11-23T17:05:54,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, UNASSIGN 2024-11-23T17:05:54,004 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=5b0c07975ee7c61139f3fcd802f8c8d2, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:54,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, UNASSIGN because future has completed 2024-11-23T17:05:54,009 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:05:54,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:54,029 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=1a7c5470b472:37875 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-23T17:05:54,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:05:54,171 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:54,171 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:05:54,172 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 5b0c07975ee7c61139f3fcd802f8c8d2, disabling compactions & flushes 2024-11-23T17:05:54,172 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:54,172 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:54,172 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. after waiting 0 ms 2024-11-23T17:05:54,173 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:54,202 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-23T17:05:54,207 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:05:54,207 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2. 2024-11-23T17:05:54,207 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 5b0c07975ee7c61139f3fcd802f8c8d2: Waiting for close lock at 1732381554172Running coprocessor pre-close hooks at 1732381554172Disabling compacts and flushes for region at 1732381554172Disabling writes for close at 1732381554172Writing region close event to WAL at 1732381554189 (+17 ms)Running coprocessor post-close hooks at 1732381554204 (+15 ms)Closed at 1732381554207 (+3 ms) 2024-11-23T17:05:54,211 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:54,212 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=5b0c07975ee7c61139f3fcd802f8c8d2, regionState=CLOSED 2024-11-23T17:05:54,215 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:54,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T17:05:54,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 5b0c07975ee7c61139f3fcd802f8c8d2, server=1a7c5470b472,37875,1732381534675 in 209 msec 2024-11-23T17:05:54,226 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-23T17:05:54,226 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=5b0c07975ee7c61139f3fcd802f8c8d2, UNASSIGN in 222 msec 2024-11-23T17:05:54,238 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:54,242 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=5b0c07975ee7c61139f3fcd802f8c8d2, threads=1 2024-11-23T17:05:54,244 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ for region: 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:54,253 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 22189fffabb8439a86a91175bf46eb55_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:05:54,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741864_1040 (size=21) 2024-11-23T17:05:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741864_1040 (size=21) 2024-11-23T17:05:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741864_1040 (size=21) 2024-11-23T17:05:54,289 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 22189fffabb8439a86a91175bf46eb55_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:05:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741865_1041 (size=21) 2024-11-23T17:05:54,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741865_1041 (size=21) 2024-11-23T17:05:54,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741865_1041 (size=21) 2024-11-23T17:05:54,303 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ for region: 5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:05:54,305 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 5b0c07975ee7c61139f3fcd802f8c8d2 Daughter A: [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2] storefiles, Daughter B: [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2] storefiles. 2024-11-23T17:05:54,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:05:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741866_1042 (size=76) 2024-11-23T17:05:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741866_1042 (size=76) 2024-11-23T17:05:54,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741866_1042 (size=76) 2024-11-23T17:05:54,325 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:54,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:54,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:05:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741867_1043 (size=76) 2024-11-23T17:05:54,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741867_1043 (size=76) 2024-11-23T17:05:54,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741867_1043 (size=76) 2024-11-23T17:05:54,355 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:54,374 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T17:05:54,381 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T17:05:54,386 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732381554385"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1732381554385"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1732381554385"}]},"ts":"1732381554385"} 2024-11-23T17:05:54,386 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732381554385"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381554385"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732381554385"}]},"ts":"1732381554385"} 2024-11-23T17:05:54,386 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732381554385"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381554385"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732381554385"}]},"ts":"1732381554385"} 2024-11-23T17:05:54,406 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, ASSIGN}] 2024-11-23T17:05:54,408 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, ASSIGN 2024-11-23T17:05:54,408 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, ASSIGN 2024-11-23T17:05:54,410 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, ASSIGN; state=SPLITTING_NEW, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:05:54,410 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, ASSIGN; state=SPLITTING_NEW, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:05:54,561 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:05:54,561 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=e8613a6d83b2cc2b96bf3bd7651c01ee, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:54,561 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=226bdb0a3fd7994f18ba669ae3f7e93a, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:54,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, ASSIGN because future has completed 2024-11-23T17:05:54,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:54,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, ASSIGN because future has completed 2024-11-23T17:05:54,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:05:54,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:05:54,726 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:05:54,726 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => e8613a6d83b2cc2b96bf3bd7651c01ee, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.', STARTKEY => '', ENDKEY => '5'} 2024-11-23T17:05:54,726 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. service=AccessControlService 2024-11-23T17:05:54,727 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:54,727 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,727 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:54,727 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,727 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,729 INFO [StoreOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,730 INFO [StoreOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e8613a6d83b2cc2b96bf3bd7651c01ee columnFamilyName cf 2024-11-23T17:05:54,731 DEBUG [StoreOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:54,742 DEBUG [StoreFileOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2: NONE, but ROW specified in column family configuration 2024-11-23T17:05:54,761 DEBUG [StoreOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_-bottom 2024-11-23T17:05:54,762 INFO [StoreOpener-e8613a6d83b2cc2b96bf3bd7651c01ee-1 {}] regionserver.HStore(327): Store=e8613a6d83b2cc2b96bf3bd7651c01ee/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:54,763 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,764 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,765 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,766 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,766 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,769 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,770 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened e8613a6d83b2cc2b96bf3bd7651c01ee; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71508013, jitterRate=0.06555242836475372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:54,771 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:54,772 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for e8613a6d83b2cc2b96bf3bd7651c01ee: Running coprocessor pre-open hook at 1732381554727Writing region info on filesystem at 1732381554727Initializing all the Stores at 1732381554729 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381554729Cleaning up temporary data from old regions at 1732381554766 (+37 ms)Running coprocessor post-open hooks at 1732381554771 (+5 ms)Region opened successfully at 1732381554772 (+1 ms) 2024-11-23T17:05:54,773 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee., pid=26, masterSystemTime=1732381554721 2024-11-23T17:05:54,774 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.,because compaction is disabled. 2024-11-23T17:05:54,776 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:05:54,777 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:05:54,777 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:05:54,777 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 226bdb0a3fd7994f18ba669ae3f7e93a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.', STARTKEY => '5', ENDKEY => ''} 2024-11-23T17:05:54,777 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. service=AccessControlService 2024-11-23T17:05:54,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=e8613a6d83b2cc2b96bf3bd7651c01ee, regionState=OPEN, openSeqNum=7, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:54,778 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:05:54,778 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,778 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:05:54,778 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,778 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:54,781 INFO [StoreOpener-226bdb0a3fd7994f18ba669ae3f7e93a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,783 INFO [StoreOpener-226bdb0a3fd7994f18ba669ae3f7e93a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 226bdb0a3fd7994f18ba669ae3f7e93a columnFamilyName cf 2024-11-23T17:05:54,783 DEBUG [StoreOpener-226bdb0a3fd7994f18ba669ae3f7e93a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:54,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-11-23T17:05:54,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675 in 216 msec 2024-11-23T17:05:54,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, ASSIGN in 380 msec 2024-11-23T17:05:54,796 DEBUG [StoreFileOpener-226bdb0a3fd7994f18ba669ae3f7e93a-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2: NONE, but ROW specified in column family configuration 2024-11-23T17:05:54,798 DEBUG [StoreOpener-226bdb0a3fd7994f18ba669ae3f7e93a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_-top 2024-11-23T17:05:54,799 INFO [StoreOpener-226bdb0a3fd7994f18ba669ae3f7e93a-1 {}] regionserver.HStore(327): Store=226bdb0a3fd7994f18ba669ae3f7e93a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:05:54,799 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,800 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,801 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,802 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,802 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,805 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,806 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 226bdb0a3fd7994f18ba669ae3f7e93a; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73632425, jitterRate=0.09720863401889801}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:05:54,806 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:54,807 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 226bdb0a3fd7994f18ba669ae3f7e93a: Running coprocessor pre-open hook at 1732381554778Writing region info on filesystem at 1732381554778Initializing all the Stores at 1732381554780 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381554780Cleaning up temporary data from old regions at 1732381554802 (+22 ms)Running coprocessor post-open hooks at 1732381554806 (+4 ms)Region opened successfully at 1732381554806 2024-11-23T17:05:54,808 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a., pid=27, masterSystemTime=1732381554721 2024-11-23T17:05:54,808 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.,because compaction is disabled. 2024-11-23T17:05:54,811 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:05:54,811 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:05:54,812 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=226bdb0a3fd7994f18ba669ae3f7e93a, regionState=OPEN, openSeqNum=7, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:05:54,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:05:54,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-23T17:05:54,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675 in 249 msec 2024-11-23T17:05:54,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-11-23T17:05:54,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, ASSIGN in 416 msec 2024-11-23T17:05:54,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=5b0c07975ee7c61139f3fcd802f8c8d2, daughterA=e8613a6d83b2cc2b96bf3bd7651c01ee, daughterB=226bdb0a3fd7994f18ba669ae3f7e93a in 840 msec 2024-11-23T17:05:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:05:55,139 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:05:55,139 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:55,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381555144 (current time:1732381555144). 2024-11-23T17:05:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:05:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:05:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:05:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7962c329, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:55,146 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@552c2748, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:55,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,148 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:55,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@407c30f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:55,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:55,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:55,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51770, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:55,153 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:05:55,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:55,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,153 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b43a85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:05:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:05:55,155 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:05:55,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:05:55,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:05:55,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@705ff2bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:05:55,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:05:55,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,157 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:05:55,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40a5c255, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:05:55,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:05:55,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:05:55,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:05:55,161 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51776, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:05:55,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:05:55,165 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:05:55,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:05:55,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:05:55,166 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:05:55,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:05:55,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:05:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:05:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T17:05:55,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:05:55,170 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:05:55,172 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:05:55,175 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:05:55,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741868_1044 (size=197) 2024-11-23T17:05:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741868_1044 (size=197) 2024-11-23T17:05:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741868_1044 (size=197) 2024-11-23T17:05:55,190 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:05:55,190 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a}] 2024-11-23T17:05:55,191 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:55,191 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:55,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:05:55,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-11-23T17:05:55,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 226bdb0a3fd7994f18ba669ae3f7e93a: 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for e8613a6d83b2cc2b96bf3bd7651c01ee: 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:55,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_-top] hfiles 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_-bottom] hfiles 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741869_1045 (size=182) 2024-11-23T17:05:55,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741869_1045 (size=182) 2024-11-23T17:05:55,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741869_1045 (size=182) 2024-11-23T17:05:55,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:05:55,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T17:05:55,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-11-23T17:05:55,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:55,360 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:05:55,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741870_1046 (size=182) 2024-11-23T17:05:55,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741870_1046 (size=182) 2024-11-23T17:05:55,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741870_1046 (size=182) 2024-11-23T17:05:55,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee in 172 msec 2024-11-23T17:05:55,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:05:55,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-11-23T17:05:55,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-11-23T17:05:55,370 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:55,370 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:05:55,374 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-11-23T17:05:55,374 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a in 181 msec 2024-11-23T17:05:55,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:05:55,375 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:05:55,375 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:05:55,375 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:05:55,377 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_] hfiles 2024-11-23T17:05:55,377 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ 2024-11-23T17:05:55,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741871_1047 (size=129) 2024-11-23T17:05:55,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741871_1047 (size=129) 2024-11-23T17:05:55,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741871_1047 (size=129) 2024-11-23T17:05:55,389 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 5b0c07975ee7c61139f3fcd802f8c8d2, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,390 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:05:55,391 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:05:55,391 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,392 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741872_1048 (size=891) 2024-11-23T17:05:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741872_1048 (size=891) 2024-11-23T17:05:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741872_1048 (size=891) 2024-11-23T17:05:55,411 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:05:55,419 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:05:55,419 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,421 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:05:55,422 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T17:05:55,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 255 msec 2024-11-23T17:05:55,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:05:55,488 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:05:55,489 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489 2024-11-23T17:05:55,489 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:55,537 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:05:55,537 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,542 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:05:55,556 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:05:55,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741873_1049 (size=197) 2024-11-23T17:05:55,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741873_1049 (size=197) 2024-11-23T17:05:55,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741873_1049 (size=197) 2024-11-23T17:05:55,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741874_1050 (size=891) 2024-11-23T17:05:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741874_1050 (size=891) 2024-11-23T17:05:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741874_1050 (size=891) 2024-11-23T17:05:55,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:55,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:55,592 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-12783345311949515604.jar 2024-11-23T17:05:56,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,734 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-6154498523282785552.jar 2024-11-23T17:05:56,734 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:05:56,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:05:56,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:05:56,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:05:56,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:05:56,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:05:56,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:05:56,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:05:56,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:05:56,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:05:56,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:05:56,740 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:05:56,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:05:56,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:05:56,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:05:56,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:05:56,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:05:56,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:05:56,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:05:56,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741875_1051 (size=24020) 2024-11-23T17:05:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741875_1051 (size=24020) 2024-11-23T17:05:56,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741875_1051 (size=24020) 2024-11-23T17:05:56,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741876_1052 (size=77755) 2024-11-23T17:05:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741876_1052 (size=77755) 2024-11-23T17:05:56,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741876_1052 (size=77755) 2024-11-23T17:05:57,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741877_1053 (size=131360) 2024-11-23T17:05:57,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741877_1053 (size=131360) 2024-11-23T17:05:57,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741877_1053 (size=131360) 2024-11-23T17:05:57,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741878_1054 (size=111793) 2024-11-23T17:05:57,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741878_1054 (size=111793) 2024-11-23T17:05:57,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741878_1054 (size=111793) 2024-11-23T17:05:57,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741879_1055 (size=1832290) 2024-11-23T17:05:57,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741879_1055 (size=1832290) 2024-11-23T17:05:57,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741879_1055 (size=1832290) 2024-11-23T17:05:57,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T17:05:57,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T17:05:57,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741880_1056 (size=6424742) 2024-11-23T17:05:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741881_1057 (size=8360005) 2024-11-23T17:05:57,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741881_1057 (size=8360005) 2024-11-23T17:05:57,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741881_1057 (size=8360005) 2024-11-23T17:05:57,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741882_1058 (size=503880) 2024-11-23T17:05:57,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741882_1058 (size=503880) 2024-11-23T17:05:57,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741882_1058 (size=503880) 2024-11-23T17:05:57,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741883_1059 (size=322274) 2024-11-23T17:05:57,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741883_1059 (size=322274) 2024-11-23T17:05:57,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741883_1059 (size=322274) 2024-11-23T17:05:57,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:05:57,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:05:57,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:05:57,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741885_1061 (size=45609) 2024-11-23T17:05:57,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741885_1061 (size=45609) 2024-11-23T17:05:57,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741885_1061 (size=45609) 2024-11-23T17:05:57,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741886_1062 (size=136454) 2024-11-23T17:05:57,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741886_1062 (size=136454) 2024-11-23T17:05:57,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741886_1062 (size=136454) 2024-11-23T17:05:57,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741887_1063 (size=1597270) 2024-11-23T17:05:57,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741887_1063 (size=1597270) 2024-11-23T17:05:57,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741887_1063 (size=1597270) 2024-11-23T17:05:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741888_1064 (size=440956) 2024-11-23T17:05:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741888_1064 (size=440956) 2024-11-23T17:05:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741888_1064 (size=440956) 2024-11-23T17:05:57,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741889_1065 (size=30873) 2024-11-23T17:05:57,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741889_1065 (size=30873) 2024-11-23T17:05:57,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741889_1065 (size=30873) 2024-11-23T17:05:57,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741890_1066 (size=29229) 2024-11-23T17:05:57,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741890_1066 (size=29229) 2024-11-23T17:05:57,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741890_1066 (size=29229) 2024-11-23T17:05:57,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741891_1067 (size=903661) 2024-11-23T17:05:57,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741891_1067 (size=903661) 2024-11-23T17:05:57,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741891_1067 (size=903661) 2024-11-23T17:05:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741892_1068 (size=5175431) 2024-11-23T17:05:58,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741892_1068 (size=5175431) 2024-11-23T17:05:58,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741892_1068 (size=5175431) 2024-11-23T17:05:58,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741893_1069 (size=232881) 2024-11-23T17:05:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741893_1069 (size=232881) 2024-11-23T17:05:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741893_1069 (size=232881) 2024-11-23T17:05:58,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741894_1070 (size=1323991) 2024-11-23T17:05:58,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741894_1070 (size=1323991) 2024-11-23T17:05:58,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741894_1070 (size=1323991) 2024-11-23T17:05:58,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741895_1071 (size=4695811) 2024-11-23T17:05:58,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741895_1071 (size=4695811) 2024-11-23T17:05:58,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741895_1071 (size=4695811) 2024-11-23T17:05:58,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741896_1072 (size=1877034) 2024-11-23T17:05:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741896_1072 (size=1877034) 2024-11-23T17:05:58,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741896_1072 (size=1877034) 2024-11-23T17:05:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741897_1073 (size=217555) 2024-11-23T17:05:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741897_1073 (size=217555) 2024-11-23T17:05:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741897_1073 (size=217555) 2024-11-23T17:05:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741898_1074 (size=4188619) 2024-11-23T17:05:58,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741898_1074 (size=4188619) 2024-11-23T17:05:58,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741898_1074 (size=4188619) 2024-11-23T17:05:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741899_1075 (size=127628) 2024-11-23T17:05:58,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741899_1075 (size=127628) 2024-11-23T17:05:58,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741899_1075 (size=127628) 2024-11-23T17:05:58,393 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:05:58,399 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-11-23T17:05:58,406 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=5b0c07975ee7c61139f3fcd802f8c8d2-22189fffabb8439a86a91175bf46eb55_SeqId_4_. 2024-11-23T17:05:58,406 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=5b0c07975ee7c61139f3fcd802f8c8d2-22189fffabb8439a86a91175bf46eb55_SeqId_4_. 2024-11-23T17:05:58,407 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-11-23T17:05:58,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741900_1076 (size=244) 2024-11-23T17:05:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741900_1076 (size=244) 2024-11-23T17:05:58,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741900_1076 (size=244) 2024-11-23T17:05:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741901_1077 (size=17) 2024-11-23T17:05:58,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741901_1077 (size=17) 2024-11-23T17:05:58,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741901_1077 (size=17) 2024-11-23T17:05:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741902_1078 (size=304133) 2024-11-23T17:05:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741902_1078 (size=304133) 2024-11-23T17:05:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741902_1078 (size=304133) 2024-11-23T17:05:59,062 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:05:59,062 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:05:59,309 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0001_000001 (auth:SIMPLE) from 127.0.0.1:48448 2024-11-23T17:05:59,710 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:06:03,176 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:06:04,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T17:06:04,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:06:07,397 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0001_000001 (auth:SIMPLE) from 127.0.0.1:47722 2024-11-23T17:06:07,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741903_1079 (size=349831) 2024-11-23T17:06:07,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741903_1079 (size=349831) 2024-11-23T17:06:07,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741903_1079 (size=349831) 2024-11-23T17:06:09,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0001_000001 (auth:SIMPLE) from 127.0.0.1:49768 2024-11-23T17:06:18,898 INFO [master/1a7c5470b472:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T17:06:18,898 INFO [master/1a7c5470b472:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T17:06:29,378 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c56c74f89526c98c0086020fd89c555, had cached 0 bytes from a total of 15937 2024-11-23T17:06:29,406 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 42e2b13e79d70de0ccaa034b156e7d6e, had cached 0 bytes from a total of 5754 2024-11-23T17:06:33,176 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:06:38,033 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 64dcd7fd9fc755d9ffeb5ce90b3151ce changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:06:38,034 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7c56c74f89526c98c0086020fd89c555 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:06:38,035 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 42e2b13e79d70de0ccaa034b156e7d6e changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:06:39,727 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e8613a6d83b2cc2b96bf3bd7651c01ee, had cached 0 bytes from a total of 320414712 2024-11-23T17:06:39,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 226bdb0a3fd7994f18ba669ae3f7e93a, had cached 0 bytes from a total of 320414712 2024-11-23T17:06:52,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741904_1080 (size=134217728) 2024-11-23T17:06:52,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741904_1080 (size=134217728) 2024-11-23T17:06:52,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741904_1080 (size=134217728) 2024-11-23T17:07:03,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:07:14,378 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7c56c74f89526c98c0086020fd89c555, had cached 0 bytes from a total of 15937 2024-11-23T17:07:14,406 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 42e2b13e79d70de0ccaa034b156e7d6e, had cached 0 bytes from a total of 5754 2024-11-23T17:07:24,728 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region e8613a6d83b2cc2b96bf3bd7651c01ee, had cached 0 bytes from a total of 320414712 2024-11-23T17:07:24,778 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 226bdb0a3fd7994f18ba669ae3f7e93a, had cached 0 bytes from a total of 320414712 2024-11-23T17:07:29,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741905_1081 (size=134217728) 2024-11-23T17:07:29,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741905_1081 (size=134217728) 2024-11-23T17:07:29,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741905_1081 (size=134217728) 2024-11-23T17:07:33,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:07:36,657 WARN [regionserver/1a7c5470b472:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 7, running: 1 2024-11-23T17:07:43,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741906_1082 (size=51979256) 2024-11-23T17:07:43,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741906_1082 (size=51979256) 2024-11-23T17:07:43,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741906_1082 (size=51979256) 2024-11-23T17:07:43,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741907_1083 (size=17520) 2024-11-23T17:07:43,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741907_1083 (size=17520) 2024-11-23T17:07:43,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741907_1083 (size=17520) 2024-11-23T17:07:43,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741908_1084 (size=482) 2024-11-23T17:07:43,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741908_1084 (size=482) 2024-11-23T17:07:43,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741908_1084 (size=482) 2024-11-23T17:07:43,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741909_1085 (size=17520) 2024-11-23T17:07:43,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741909_1085 (size=17520) 2024-11-23T17:07:43,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741909_1085 (size=17520) 2024-11-23T17:07:43,682 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000002/launch_container.sh] 2024-11-23T17:07:43,682 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000002/container_tokens] 2024-11-23T17:07:43,682 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000002/sysfs] 2024-11-23T17:07:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741910_1086 (size=349831) 2024-11-23T17:07:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741910_1086 (size=349831) 2024-11-23T17:07:43,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741910_1086 (size=349831) 2024-11-23T17:07:43,708 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0001_000001 (auth:SIMPLE) from 127.0.0.1:55686 2024-11-23T17:07:45,648 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:07:45,650 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:07:45,659 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:45,659 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:07:45,660 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:07:45,660 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:45,661 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T17:07:45,661 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T17:07:45,661 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:45,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T17:07:45,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381555489/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T17:07:45,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:07:45,684 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381665684"}]},"ts":"1732381665684"} 2024-11-23T17:07:45,687 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T17:07:45,687 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T17:07:45,689 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-11-23T17:07:45,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, UNASSIGN}] 2024-11-23T17:07:45,696 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, UNASSIGN 2024-11-23T17:07:45,696 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, UNASSIGN 2024-11-23T17:07:45,697 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=226bdb0a3fd7994f18ba669ae3f7e93a, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:07:45,697 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=e8613a6d83b2cc2b96bf3bd7651c01ee, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:07:45,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, UNASSIGN because future has completed 2024-11-23T17:07:45,700 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:07:45,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:07:45,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, UNASSIGN because future has completed 2024-11-23T17:07:45,702 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:07:45,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:07:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:07:45,854 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:07:45,854 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:07:45,854 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 226bdb0a3fd7994f18ba669ae3f7e93a, disabling compactions & flushes 2024-11-23T17:07:45,855 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:07:45,855 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:07:45,855 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. after waiting 0 ms 2024-11-23T17:07:45,855 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:07:45,860 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T17:07:45,860 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:07:45,860 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a. 2024-11-23T17:07:45,860 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 226bdb0a3fd7994f18ba669ae3f7e93a: Waiting for close lock at 1732381665854Running coprocessor pre-close hooks at 1732381665854Disabling compacts and flushes for region at 1732381665854Disabling writes for close at 1732381665855 (+1 ms)Writing region close event to WAL at 1732381665856 (+1 ms)Running coprocessor post-close hooks at 1732381665860 (+4 ms)Closed at 1732381665860 2024-11-23T17:07:45,863 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:07:45,863 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:07:45,863 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:07:45,863 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing e8613a6d83b2cc2b96bf3bd7651c01ee, disabling compactions & flushes 2024-11-23T17:07:45,863 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:07:45,863 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:07:45,863 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. after waiting 0 ms 2024-11-23T17:07:45,863 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:07:45,863 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=226bdb0a3fd7994f18ba669ae3f7e93a, regionState=CLOSED 2024-11-23T17:07:45,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:07:45,868 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T17:07:45,869 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:07:45,869 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee. 2024-11-23T17:07:45,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-11-23T17:07:45,869 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for e8613a6d83b2cc2b96bf3bd7651c01ee: Waiting for close lock at 1732381665863Running coprocessor pre-close hooks at 1732381665863Disabling compacts and flushes for region at 1732381665863Disabling writes for close at 1732381665863Writing region close event to WAL at 1732381665864 (+1 ms)Running coprocessor post-close hooks at 1732381665869 (+5 ms)Closed at 1732381665869 2024-11-23T17:07:45,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 226bdb0a3fd7994f18ba669ae3f7e93a, server=1a7c5470b472,37875,1732381534675 in 166 msec 2024-11-23T17:07:45,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=226bdb0a3fd7994f18ba669ae3f7e93a, UNASSIGN in 175 msec 2024-11-23T17:07:45,871 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:07:45,872 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=e8613a6d83b2cc2b96bf3bd7651c01ee, regionState=CLOSED 2024-11-23T17:07:45,873 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:07:45,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-11-23T17:07:45,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure e8613a6d83b2cc2b96bf3bd7651c01ee, server=1a7c5470b472,37875,1732381534675 in 173 msec 2024-11-23T17:07:45,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-23T17:07:45,879 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=e8613a6d83b2cc2b96bf3bd7651c01ee, UNASSIGN in 183 msec 2024-11-23T17:07:45,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-11-23T17:07:45,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 192 msec 2024-11-23T17:07:45,884 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381665884"}]},"ts":"1732381665884"} 2024-11-23T17:07:45,886 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T17:07:45,886 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T17:07:45,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 210 msec 2024-11-23T17:07:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:07:45,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:07:46,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,008 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,010 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,013 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,018 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,018 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,018 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,018 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,021 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:07:46,021 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:07:46,022 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:07:46,022 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:46,022 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-23T17:07:46,022 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:46,023 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:46,025 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/recovered.edits] 2024-11-23T17:07:46,025 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/recovered.edits] 2024-11-23T17:07:46,025 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/recovered.edits] 2024-11-23T17:07:46,032 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_ 2024-11-23T17:07:46,032 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:07:46,032 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/cf/22189fffabb8439a86a91175bf46eb55_SeqId_4_.5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:07:46,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/recovered.edits/6.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2/recovered.edits/6.seqid 2024-11-23T17:07:46,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/recovered.edits/10.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a/recovered.edits/10.seqid 2024-11-23T17:07:46,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/recovered.edits/10.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee/recovered.edits/10.seqid 2024-11-23T17:07:46,037 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/5b0c07975ee7c61139f3fcd802f8c8d2 2024-11-23T17:07:46,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/226bdb0a3fd7994f18ba669ae3f7e93a 2024-11-23T17:07:46,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportFileSystemStateWithSplitRegion/e8613a6d83b2cc2b96bf3bd7651c01ee 2024-11-23T17:07:46,038 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-11-23T17:07:46,040 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T17:07:46,050 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T17:07:46,053 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T17:07:46,055 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,055 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T17:07:46,055 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381666055"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,055 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381666055"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,055 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381666055"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,058 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-11-23T17:07:46,058 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 5b0c07975ee7c61139f3fcd802f8c8d2, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381546985.5b0c07975ee7c61139f3fcd802f8c8d2.', STARTKEY => '', ENDKEY => ''}, {ENCODED => e8613a6d83b2cc2b96bf3bd7651c01ee, NAME => 'testExportFileSystemStateWithSplitRegion,,1732381553983.e8613a6d83b2cc2b96bf3bd7651c01ee.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 226bdb0a3fd7994f18ba669ae3f7e93a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732381553983.226bdb0a3fd7994f18ba669ae3f7e93a.', STARTKEY => '5', ENDKEY => ''}] 2024-11-23T17:07:46,058 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T17:07:46,059 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381666058"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,060 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-11-23T17:07:46,061 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 58 msec 2024-11-23T17:07:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-23T17:07:46,129 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,130 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:07:46,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:07:46,135 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381666135"}]},"ts":"1732381666135"} 2024-11-23T17:07:46,137 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T17:07:46,138 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T17:07:46,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-11-23T17:07:46,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, UNASSIGN}] 2024-11-23T17:07:46,143 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, UNASSIGN 2024-11-23T17:07:46,143 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, UNASSIGN 2024-11-23T17:07:46,144 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=7c56c74f89526c98c0086020fd89c555, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:07:46,144 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=42e2b13e79d70de0ccaa034b156e7d6e, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:07:46,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, UNASSIGN because future has completed 2024-11-23T17:07:46,147 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:07:46,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:07:46,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, UNASSIGN because future has completed 2024-11-23T17:07:46,149 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:07:46,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:07:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:07:46,301 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:07:46,302 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:07:46,302 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 7c56c74f89526c98c0086020fd89c555, disabling compactions & flushes 2024-11-23T17:07:46,302 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:07:46,302 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:07:46,302 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. after waiting 0 ms 2024-11-23T17:07:46,302 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:07:46,304 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37663, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:07:46,305 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:07:46,305 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:07:46,305 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 42e2b13e79d70de0ccaa034b156e7d6e, disabling compactions & flushes 2024-11-23T17:07:46,305 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:07:46,305 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:07:46,305 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. after waiting 0 ms 2024-11-23T17:07:46,305 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:07:46,308 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:07:46,309 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:07:46,309 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555. 2024-11-23T17:07:46,309 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 7c56c74f89526c98c0086020fd89c555: Waiting for close lock at 1732381666302Running coprocessor pre-close hooks at 1732381666302Disabling compacts and flushes for region at 1732381666302Disabling writes for close at 1732381666302Writing region close event to WAL at 1732381666303 (+1 ms)Running coprocessor post-close hooks at 1732381666309 (+6 ms)Closed at 1732381666309 2024-11-23T17:07:46,310 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:07:46,311 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:07:46,311 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e. 2024-11-23T17:07:46,311 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 42e2b13e79d70de0ccaa034b156e7d6e: Waiting for close lock at 1732381666305Running coprocessor pre-close hooks at 1732381666305Disabling compacts and flushes for region at 1732381666305Disabling writes for close at 1732381666305Writing region close event to WAL at 1732381666306 (+1 ms)Running coprocessor post-close hooks at 1732381666311 (+5 ms)Closed at 1732381666311 2024-11-23T17:07:46,312 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 7c56c74f89526c98c0086020fd89c555 2024-11-23T17:07:46,312 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=7c56c74f89526c98c0086020fd89c555, regionState=CLOSED 2024-11-23T17:07:46,313 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:07:46,314 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=42e2b13e79d70de0ccaa034b156e7d6e, regionState=CLOSED 2024-11-23T17:07:46,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:07:46,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:07:46,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-23T17:07:46,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 7c56c74f89526c98c0086020fd89c555, server=1a7c5470b472,37875,1732381534675 in 169 msec 2024-11-23T17:07:46,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-11-23T17:07:46,320 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 42e2b13e79d70de0ccaa034b156e7d6e, server=1a7c5470b472,43453,1732381534825 in 169 msec 2024-11-23T17:07:46,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7c56c74f89526c98c0086020fd89c555, UNASSIGN in 178 msec 2024-11-23T17:07:46,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-11-23T17:07:46,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=42e2b13e79d70de0ccaa034b156e7d6e, UNASSIGN in 179 msec 2024-11-23T17:07:46,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-23T17:07:46,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 184 msec 2024-11-23T17:07:46,326 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381666326"}]},"ts":"1732381666326"} 2024-11-23T17:07:46,328 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T17:07:46,328 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T17:07:46,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 197 msec 2024-11-23T17:07:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:07:46,449 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:07:46,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,461 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,462 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,465 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,468 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:07:46,468 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555 2024-11-23T17:07:46,471 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/recovered.edits] 2024-11-23T17:07:46,471 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/recovered.edits] 2024-11-23T17:07:46,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-23T17:07:46,478 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/cf/ee5c8540c4d14a6ba8f6a6339f60d0f8 2024-11-23T17:07:46,478 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/cf/84fa8d68826649708ae50439ceb84146 2024-11-23T17:07:46,483 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e/recovered.edits/9.seqid 2024-11-23T17:07:46,484 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555/recovered.edits/9.seqid 2024-11-23T17:07:46,484 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:07:46,485 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSplitRegion/7c56c74f89526c98c0086020fd89c555 2024-11-23T17:07:46,485 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-11-23T17:07:46,485 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-23T17:07:46,487 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-11-23T17:07:46,492 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b202411238e7008dd9bb54cec88ca24554a3f0710_7c56c74f89526c98c0086020fd89c555 2024-11-23T17:07:46,494 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a9e4e5323c844acd8706007974c1a587_42e2b13e79d70de0ccaa034b156e7d6e 2024-11-23T17:07:46,494 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-23T17:07:46,497 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,501 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T17:07:46,504 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T17:07:46,505 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,505 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T17:07:46,506 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381666505"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,506 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381666505"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,508 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:07:46,509 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 42e2b13e79d70de0ccaa034b156e7d6e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732381543797.42e2b13e79d70de0ccaa034b156e7d6e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 7c56c74f89526c98c0086020fd89c555, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732381543797.7c56c74f89526c98c0086020fd89c555.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:07:46,509 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T17:07:46,509 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381666509"}]},"ts":"9223372036854775807"} 2024-11-23T17:07:46,513 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-11-23T17:07:46,514 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,517 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 62 msec 2024-11-23T17:07:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-23T17:07:46,578 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,579 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:07:46,597 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:07:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,602 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:07:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,606 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:07:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:46,637 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=758 (was 717) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:35156 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42747 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 28837) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42747 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1409 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_729965701_1 at /127.0.0.1:37890 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:54504 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:37908 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=796 (was 784) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=367 (was 233) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2119 (was 7445) 2024-11-23T17:07:46,637 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-11-23T17:07:46,653 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=758, OpenFileDescriptor=796, MaxFileDescriptor=1048576, SystemLoadAverage=367, ProcessCount=17, AvailableMemoryMB=2118 2024-11-23T17:07:46,653 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=758 is superior to 500 2024-11-23T17:07:46,655 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:07:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:07:46,658 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:07:46,658 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-11-23T17:07:46,659 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:07:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:07:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741911_1087 (size=442) 2024-11-23T17:07:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741911_1087 (size=442) 2024-11-23T17:07:46,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741911_1087 (size=442) 2024-11-23T17:07:46,670 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 86de3ec4f1038bcfe993e8596174e0af, NAME => 'testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:07:46,671 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1d11954f6f2f4212f289500eac9f57fa, NAME => 'testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:07:46,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741912_1088 (size=67) 2024-11-23T17:07:46,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741912_1088 (size=67) 2024-11-23T17:07:46,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741912_1088 (size=67) 2024-11-23T17:07:46,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741913_1089 (size=67) 2024-11-23T17:07:46,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741913_1089 (size=67) 2024-11-23T17:07:46,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741913_1089 (size=67) 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 86de3ec4f1038bcfe993e8596174e0af, disabling compactions & flushes 2024-11-23T17:07:46,682 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. after waiting 0 ms 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:46,682 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:46,682 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 86de3ec4f1038bcfe993e8596174e0af: Waiting for close lock at 1732381666682Disabling compacts and flushes for region at 1732381666682Disabling writes for close at 1732381666682Writing region close event to WAL at 1732381666682Closed at 1732381666682 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 1d11954f6f2f4212f289500eac9f57fa, disabling compactions & flushes 2024-11-23T17:07:46,685 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. after waiting 0 ms 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:46,685 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:46,685 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1d11954f6f2f4212f289500eac9f57fa: Waiting for close lock at 1732381666685Disabling compacts and flushes for region at 1732381666685Disabling writes for close at 1732381666685Writing region close event to WAL at 1732381666685Closed at 1732381666685 2024-11-23T17:07:46,687 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:07:46,687 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732381666687"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381666687"}]},"ts":"1732381666687"} 2024-11-23T17:07:46,687 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732381666687"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381666687"}]},"ts":"1732381666687"} 2024-11-23T17:07:46,690 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:07:46,691 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:07:46,691 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381666691"}]},"ts":"1732381666691"} 2024-11-23T17:07:46,692 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-23T17:07:46,693 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:07:46,694 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:07:46,694 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:07:46,694 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:07:46,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:07:46,694 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, ASSIGN}] 2024-11-23T17:07:46,695 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, ASSIGN 2024-11-23T17:07:46,695 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, ASSIGN 2024-11-23T17:07:46,696 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:07:46,696 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:07:46,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:07:46,847 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:07:46,847 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=1d11954f6f2f4212f289500eac9f57fa, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:07:46,847 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=86de3ec4f1038bcfe993e8596174e0af, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:07:46,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, ASSIGN because future has completed 2024-11-23T17:07:46,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:07:46,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, ASSIGN because future has completed 2024-11-23T17:07:46,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:07:46,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:07:47,005 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,005 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 86de3ec4f1038bcfe993e8596174e0af, NAME => 'testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:07:47,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 1d11954f6f2f4212f289500eac9f57fa, NAME => 'testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. service=AccessControlService 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. service=AccessControlService 2024-11-23T17:07:47,006 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:07:47,006 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,008 INFO [StoreOpener-86de3ec4f1038bcfe993e8596174e0af-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,008 INFO [StoreOpener-1d11954f6f2f4212f289500eac9f57fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,010 INFO [StoreOpener-86de3ec4f1038bcfe993e8596174e0af-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 86de3ec4f1038bcfe993e8596174e0af columnFamilyName cf 2024-11-23T17:07:47,010 INFO [StoreOpener-1d11954f6f2f4212f289500eac9f57fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1d11954f6f2f4212f289500eac9f57fa columnFamilyName cf 2024-11-23T17:07:47,012 DEBUG [StoreOpener-1d11954f6f2f4212f289500eac9f57fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,012 DEBUG [StoreOpener-86de3ec4f1038bcfe993e8596174e0af-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,013 INFO [StoreOpener-1d11954f6f2f4212f289500eac9f57fa-1 {}] regionserver.HStore(327): Store=1d11954f6f2f4212f289500eac9f57fa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:07:47,013 INFO [StoreOpener-86de3ec4f1038bcfe993e8596174e0af-1 {}] regionserver.HStore(327): Store=86de3ec4f1038bcfe993e8596174e0af/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:07:47,013 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,014 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,014 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,015 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,016 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,017 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,017 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,019 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:07:47,020 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:07:47,021 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 1d11954f6f2f4212f289500eac9f57fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70660911, jitterRate=0.052929624915122986}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:07:47,021 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 86de3ec4f1038bcfe993e8596174e0af; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66542022, jitterRate=-0.008446604013442993}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:07:47,021 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,021 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,022 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 1d11954f6f2f4212f289500eac9f57fa: Running coprocessor pre-open hook at 1732381667007Writing region info on filesystem at 1732381667007Initializing all the Stores at 1732381667008 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381667008Cleaning up temporary data from old regions at 1732381667015 (+7 ms)Running coprocessor post-open hooks at 1732381667021 (+6 ms)Region opened successfully at 1732381667021 2024-11-23T17:07:47,022 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 86de3ec4f1038bcfe993e8596174e0af: Running coprocessor pre-open hook at 1732381667007Writing region info on filesystem at 1732381667007Initializing all the Stores at 1732381667008 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381667008Cleaning up temporary data from old regions at 1732381667016 (+8 ms)Running coprocessor post-open hooks at 1732381667021 (+5 ms)Region opened successfully at 1732381667022 (+1 ms) 2024-11-23T17:07:47,023 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa., pid=49, masterSystemTime=1732381667002 2024-11-23T17:07:47,023 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af., pid=48, masterSystemTime=1732381667001 2024-11-23T17:07:47,028 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,028 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,029 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=1d11954f6f2f4212f289500eac9f57fa, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:07:47,029 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,029 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,030 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=86de3ec4f1038bcfe993e8596174e0af, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:07:47,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:07:47,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:07:47,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-11-23T17:07:47,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825 in 182 msec 2024-11-23T17:07:47,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-11-23T17:07:47,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675 in 185 msec 2024-11-23T17:07:47,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, ASSIGN in 345 msec 2024-11-23T17:07:47,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-23T17:07:47,043 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, ASSIGN in 346 msec 2024-11-23T17:07:47,046 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:07:47,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381667046"}]},"ts":"1732381667046"} 2024-11-23T17:07:47,049 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-23T17:07:47,051 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:07:47,051 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-23T17:07:47,055 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:07:47,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:47,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:47,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:47,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:07:47,062 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:47,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:47,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:47,064 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:07:47,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 408 msec 2024-11-23T17:07:47,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:07:47,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:07:47,288 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-23T17:07:47,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,291 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:07:47,293 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,299 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,304 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:07:47,305 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,307 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:07:47,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381667307 (current time:1732381667307). 2024-11-23T17:07:47,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:07:47,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T17:07:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:07:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68e03df5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:07:47,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:07:47,309 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6603832, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:07:47,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,311 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51844, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:07:47,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0abfc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:07:47,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:07:47,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:07:47,314 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:07:47,315 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:07:47,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:07:47,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,315 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:07:47,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7afcc1c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:07:47,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:07:47,317 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:07:47,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:07:47,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:07:47,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1694a5bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:07:47,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:07:47,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,318 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51854, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:07:47,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@406d4391, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:07:47,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:07:47,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:07:47,321 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:07:47,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:07:47,324 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:07:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:07:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,324 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:07:47,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:07:47,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:07:47,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:07:47,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T17:07:47,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:07:47,327 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:07:47,329 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:07:47,331 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:07:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741914_1090 (size=167) 2024-11-23T17:07:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741914_1090 (size=167) 2024-11-23T17:07:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741914_1090 (size=167) 2024-11-23T17:07:47,339 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:07:47,339 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa}] 2024-11-23T17:07:47,340 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,340 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:07:47,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-11-23T17:07:47,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 86de3ec4f1038bcfe993e8596174e0af: 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 1d11954f6f2f4212f289500eac9f57fa: 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:07:47,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:07:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741916_1092 (size=70) 2024-11-23T17:07:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741916_1092 (size=70) 2024-11-23T17:07:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741916_1092 (size=70) 2024-11-23T17:07:47,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-11-23T17:07:47,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-11-23T17:07:47,507 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,507 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741915_1091 (size=70) 2024-11-23T17:07:47,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741915_1091 (size=70) 2024-11-23T17:07:47,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741915_1091 (size=70) 2024-11-23T17:07:47,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T17:07:47,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-11-23T17:07:47,510 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,511 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af in 170 msec 2024-11-23T17:07:47,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-11-23T17:07:47,519 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:07:47,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa in 173 msec 2024-11-23T17:07:47,520 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:07:47,521 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:07:47,521 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:07:47,521 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,522 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:07:47,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741917_1093 (size=62) 2024-11-23T17:07:47,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741917_1093 (size=62) 2024-11-23T17:07:47,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741917_1093 (size=62) 2024-11-23T17:07:47,541 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:07:47,541 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-23T17:07:47,542 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-23T17:07:47,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741918_1094 (size=649) 2024-11-23T17:07:47,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741918_1094 (size=649) 2024-11-23T17:07:47,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741918_1094 (size=649) 2024-11-23T17:07:47,559 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:07:47,564 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:07:47,564 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-23T17:07:47,566 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:07:47,566 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T17:07:47,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 241 msec 2024-11-23T17:07:47,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:07:47,648 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:07:47,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:07:47,658 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:07:47,660 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,663 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-23T17:07:47,663 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,663 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:07:47,665 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,671 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,677 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:07:47,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381667680 (current time:1732381667680). 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3401c2c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:07:47,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:07:47,681 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26d9df07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:07:47,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,683 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51864, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:07:47,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47826f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:07:47,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:07:47,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:07:47,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56944, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:07:47,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:07:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:07:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:07:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3473e822, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:07:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:07:47,688 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:07:47,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:07:47,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:07:47,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a2f6ddc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:07:47,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:07:47,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,690 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51894, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:07:47,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7641bb97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:07:47,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:07:47,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:07:47,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:07:47,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:07:47,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:07:47,695 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:07:47,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:07:47,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:07:47,695 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:07:47,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:07:47,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:07:47,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:07:47,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T17:07:47,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:07:47,698 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:07:47,699 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:07:47,702 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:07:47,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741919_1095 (size=162) 2024-11-23T17:07:47,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741919_1095 (size=162) 2024-11-23T17:07:47,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741919_1095 (size=162) 2024-11-23T17:07:47,711 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:07:47,711 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa}] 2024-11-23T17:07:47,713 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,713 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:07:47,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-11-23T17:07:47,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-11-23T17:07:47,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,865 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 86de3ec4f1038bcfe993e8596174e0af 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T17:07:47,865 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 1d11954f6f2f4212f289500eac9f57fa 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T17:07:47,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af is 71, key is 0404052eef7729af6497f9d4f75e5603/cf:q/1732381667655/Put/seqid=0 2024-11-23T17:07:47,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa is 71, key is 171183089f0a91090e0b5b41ff72e7da/cf:q/1732381667658/Put/seqid=0 2024-11-23T17:07:47,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741920_1096 (size=5032) 2024-11-23T17:07:47,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741920_1096 (size=5032) 2024-11-23T17:07:47,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741920_1096 (size=5032) 2024-11-23T17:07:47,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741921_1097 (size=8241) 2024-11-23T17:07:47,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741921_1097 (size=8241) 2024-11-23T17:07:47,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741921_1097 (size=8241) 2024-11-23T17:07:47,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,902 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,903 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/.tmp/cf/bb7052aae88b4b108045669431c0a573, store: [table=testtb-testExportWithTargetName family=cf region=86de3ec4f1038bcfe993e8596174e0af] 2024-11-23T17:07:47,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/.tmp/cf/bb7052aae88b4b108045669431c0a573 is 208, key is 09d82c66667eebe54b49cd2f07c878b00/cf:q/1732381667655/Put/seqid=0 2024-11-23T17:07:47,905 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,906 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/.tmp/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7, store: [table=testtb-testExportWithTargetName family=cf region=1d11954f6f2f4212f289500eac9f57fa] 2024-11-23T17:07:47,907 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/.tmp/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 is 208, key is 1f34a281130ffc50d789267e12c525cec/cf:q/1732381667658/Put/seqid=0 2024-11-23T17:07:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741922_1098 (size=5706) 2024-11-23T17:07:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741922_1098 (size=5706) 2024-11-23T17:07:47,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741922_1098 (size=5706) 2024-11-23T17:07:47,932 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/.tmp/cf/bb7052aae88b4b108045669431c0a573 2024-11-23T17:07:47,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/.tmp/cf/bb7052aae88b4b108045669431c0a573 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573 2024-11-23T17:07:47,945 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573, entries=2, sequenceid=6, filesize=5.6 K 2024-11-23T17:07:47,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 86de3ec4f1038bcfe993e8596174e0af in 81ms, sequenceid=6, compaction requested=false 2024-11-23T17:07:47,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-23T17:07:47,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 86de3ec4f1038bcfe993e8596174e0af: 2024-11-23T17:07:47,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. for snaptb0-testExportWithTargetName completed. 2024-11-23T17:07:47,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:07:47,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:07:47,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573] hfiles 2024-11-23T17:07:47,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573 for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:07:47,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741923_1099 (size=15153) 2024-11-23T17:07:47,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741923_1099 (size=15153) 2024-11-23T17:07:47,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741923_1099 (size=15153) 2024-11-23T17:07:47,956 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/.tmp/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 2024-11-23T17:07:47,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741924_1100 (size=109) 2024-11-23T17:07:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741924_1100 (size=109) 2024-11-23T17:07:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741924_1100 (size=109) 2024-11-23T17:07:47,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:07:47,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T17:07:47,964 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/.tmp/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 2024-11-23T17:07:47,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-11-23T17:07:47,964 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,965 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 86de3ec4f1038bcfe993e8596174e0af in 255 msec 2024-11-23T17:07:47,971 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7, entries=48, sequenceid=6, filesize=14.8 K 2024-11-23T17:07:47,972 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 1d11954f6f2f4212f289500eac9f57fa in 106ms, sequenceid=6, compaction requested=false 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 1d11954f6f2f4212f289500eac9f57fa: 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. for snaptb0-testExportWithTargetName completed. 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7] hfiles 2024-11-23T17:07:47,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:07:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741925_1101 (size=109) 2024-11-23T17:07:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741925_1101 (size=109) 2024-11-23T17:07:47,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741925_1101 (size=109) 2024-11-23T17:07:47,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:07:47,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-11-23T17:07:47,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-11-23T17:07:47,983 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,984 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-23T17:07:47,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1d11954f6f2f4212f289500eac9f57fa in 274 msec 2024-11-23T17:07:47,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:07:47,987 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:07:47,988 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:07:47,989 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:07:47,989 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:07:47,990 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af] hfiles 2024-11-23T17:07:47,990 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:07:47,990 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:07:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741926_1102 (size=293) 2024-11-23T17:07:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741926_1102 (size=293) 2024-11-23T17:07:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741926_1102 (size=293) 2024-11-23T17:07:48,003 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:07:48,003 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-23T17:07:48,010 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-23T17:07:48,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:07:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741927_1103 (size=959) 2024-11-23T17:07:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741927_1103 (size=959) 2024-11-23T17:07:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741927_1103 (size=959) 2024-11-23T17:07:48,042 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:07:48,050 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:07:48,051 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T17:07:48,053 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:07:48,053 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T17:07:48,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 357 msec 2024-11-23T17:07:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:07:48,329 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:07:48,329 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329 2024-11-23T17:07:48,329 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:07:48,375 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:07:48,375 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T17:07:48,378 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:07:48,386 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T17:07:48,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741928_1104 (size=162) 2024-11-23T17:07:48,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741928_1104 (size=162) 2024-11-23T17:07:48,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741928_1104 (size=162) 2024-11-23T17:07:48,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741929_1105 (size=959) 2024-11-23T17:07:48,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741929_1105 (size=959) 2024-11-23T17:07:48,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741929_1105 (size=959) 2024-11-23T17:07:48,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741930_1106 (size=154) 2024-11-23T17:07:48,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741930_1106 (size=154) 2024-11-23T17:07:48,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741930_1106 (size=154) 2024-11-23T17:07:48,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:48,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:48,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-12900760545021401079.jar 2024-11-23T17:07:49,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,595 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-13264227522568385988.jar 2024-11-23T17:07:49,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:07:49,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:07:49,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:07:49,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:07:49,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:07:49,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:07:49,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:07:49,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:07:49,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:07:49,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:07:49,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:07:49,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:07:49,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:07:49,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:07:49,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:07:49,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:07:49,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:07:49,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:07:49,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:07:49,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741931_1107 (size=24020) 2024-11-23T17:07:49,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741931_1107 (size=24020) 2024-11-23T17:07:49,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741931_1107 (size=24020) 2024-11-23T17:07:49,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741932_1108 (size=77755) 2024-11-23T17:07:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741932_1108 (size=77755) 2024-11-23T17:07:49,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741932_1108 (size=77755) 2024-11-23T17:07:49,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741933_1109 (size=131360) 2024-11-23T17:07:49,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741933_1109 (size=131360) 2024-11-23T17:07:49,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741933_1109 (size=131360) 2024-11-23T17:07:49,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741934_1110 (size=111793) 2024-11-23T17:07:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741934_1110 (size=111793) 2024-11-23T17:07:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741934_1110 (size=111793) 2024-11-23T17:07:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741935_1111 (size=1832290) 2024-11-23T17:07:49,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741935_1111 (size=1832290) 2024-11-23T17:07:49,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741935_1111 (size=1832290) 2024-11-23T17:07:49,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741936_1112 (size=440956) 2024-11-23T17:07:49,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741936_1112 (size=440956) 2024-11-23T17:07:49,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741936_1112 (size=440956) 2024-11-23T17:07:49,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0001_000001 (auth:SIMPLE) from 127.0.0.1:50798 2024-11-23T17:07:49,835 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000001/launch_container.sh] 2024-11-23T17:07:49,835 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000001/container_tokens] 2024-11-23T17:07:49,835 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0001/container_1732381541272_0001_01_000001/sysfs] 2024-11-23T17:07:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741937_1113 (size=8360005) 2024-11-23T17:07:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741937_1113 (size=8360005) 2024-11-23T17:07:49,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741937_1113 (size=8360005) 2024-11-23T17:07:49,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741938_1114 (size=503880) 2024-11-23T17:07:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741938_1114 (size=503880) 2024-11-23T17:07:49,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741938_1114 (size=503880) 2024-11-23T17:07:49,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741939_1115 (size=322274) 2024-11-23T17:07:49,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741939_1115 (size=322274) 2024-11-23T17:07:49,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741939_1115 (size=322274) 2024-11-23T17:07:49,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741940_1116 (size=20406) 2024-11-23T17:07:49,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741940_1116 (size=20406) 2024-11-23T17:07:49,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741940_1116 (size=20406) 2024-11-23T17:07:49,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741941_1117 (size=45609) 2024-11-23T17:07:49,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741941_1117 (size=45609) 2024-11-23T17:07:49,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741941_1117 (size=45609) 2024-11-23T17:07:49,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741942_1118 (size=136454) 2024-11-23T17:07:49,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741942_1118 (size=136454) 2024-11-23T17:07:49,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741942_1118 (size=136454) 2024-11-23T17:07:49,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741943_1119 (size=1597270) 2024-11-23T17:07:49,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741943_1119 (size=1597270) 2024-11-23T17:07:49,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741943_1119 (size=1597270) 2024-11-23T17:07:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741944_1120 (size=30873) 2024-11-23T17:07:49,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741944_1120 (size=30873) 2024-11-23T17:07:49,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741944_1120 (size=30873) 2024-11-23T17:07:49,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741945_1121 (size=29229) 2024-11-23T17:07:49,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741945_1121 (size=29229) 2024-11-23T17:07:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741945_1121 (size=29229) 2024-11-23T17:07:49,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741946_1122 (size=903661) 2024-11-23T17:07:49,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741946_1122 (size=903661) 2024-11-23T17:07:49,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741946_1122 (size=903661) 2024-11-23T17:07:50,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741947_1123 (size=5175431) 2024-11-23T17:07:50,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741947_1123 (size=5175431) 2024-11-23T17:07:50,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741947_1123 (size=5175431) 2024-11-23T17:07:50,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741948_1124 (size=232881) 2024-11-23T17:07:50,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741948_1124 (size=232881) 2024-11-23T17:07:50,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741948_1124 (size=232881) 2024-11-23T17:07:50,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741949_1125 (size=1323991) 2024-11-23T17:07:50,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741949_1125 (size=1323991) 2024-11-23T17:07:50,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741949_1125 (size=1323991) 2024-11-23T17:07:50,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741950_1126 (size=4695811) 2024-11-23T17:07:50,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741950_1126 (size=4695811) 2024-11-23T17:07:50,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741950_1126 (size=4695811) 2024-11-23T17:07:50,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741951_1127 (size=1877034) 2024-11-23T17:07:50,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741951_1127 (size=1877034) 2024-11-23T17:07:50,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741951_1127 (size=1877034) 2024-11-23T17:07:50,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741952_1128 (size=6424742) 2024-11-23T17:07:50,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741952_1128 (size=6424742) 2024-11-23T17:07:50,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741952_1128 (size=6424742) 2024-11-23T17:07:50,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741953_1129 (size=217555) 2024-11-23T17:07:50,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741953_1129 (size=217555) 2024-11-23T17:07:50,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741953_1129 (size=217555) 2024-11-23T17:07:50,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741954_1130 (size=4188619) 2024-11-23T17:07:50,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741954_1130 (size=4188619) 2024-11-23T17:07:50,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741954_1130 (size=4188619) 2024-11-23T17:07:50,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741955_1131 (size=127628) 2024-11-23T17:07:50,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741955_1131 (size=127628) 2024-11-23T17:07:50,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741955_1131 (size=127628) 2024-11-23T17:07:50,323 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:07:50,327 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-23T17:07:50,330 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.8 K 2024-11-23T17:07:50,330 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-23T17:07:50,330 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-23T17:07:50,330 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-23T17:07:50,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741956_1132 (size=1031) 2024-11-23T17:07:50,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741956_1132 (size=1031) 2024-11-23T17:07:50,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741956_1132 (size=1031) 2024-11-23T17:07:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741957_1133 (size=35) 2024-11-23T17:07:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741957_1133 (size=35) 2024-11-23T17:07:50,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741957_1133 (size=35) 2024-11-23T17:07:50,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741958_1134 (size=304084) 2024-11-23T17:07:50,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741958_1134 (size=304084) 2024-11-23T17:07:50,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741958_1134 (size=304084) 2024-11-23T17:07:50,440 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:07:50,440 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:07:50,656 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:57742 2024-11-23T17:07:51,393 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:07:54,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T17:07:54,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-23T17:07:54,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:54,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:07:55,992 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:37360 2024-11-23T17:07:56,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741959_1135 (size=349782) 2024-11-23T17:07:56,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741959_1135 (size=349782) 2024-11-23T17:07:56,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741959_1135 (size=349782) 2024-11-23T17:07:58,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:57750 2024-11-23T17:07:58,395 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:34148 2024-11-23T17:07:59,135 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:57760 2024-11-23T17:07:59,142 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:34158 2024-11-23T17:07:59,845 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:08:01,825 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0002_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:08:03,177 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:08:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741960_1136 (size=15153) 2024-11-23T17:08:03,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741960_1136 (size=15153) 2024-11-23T17:08:03,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741960_1136 (size=15153) 2024-11-23T17:08:04,266 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000002/launch_container.sh] 2024-11-23T17:08:04,266 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000002/container_tokens] 2024-11-23T17:08:04,266 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000002/sysfs] 2024-11-23T17:08:05,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741962_1138 (size=5706) 2024-11-23T17:08:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741962_1138 (size=5706) 2024-11-23T17:08:05,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741962_1138 (size=5706) 2024-11-23T17:08:05,484 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000004/launch_container.sh] 2024-11-23T17:08:05,484 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000004/container_tokens] 2024-11-23T17:08:05,484 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000004/sysfs] 2024-11-23T17:08:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741963_1139 (size=5032) 2024-11-23T17:08:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741963_1139 (size=5032) 2024-11-23T17:08:06,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741963_1139 (size=5032) 2024-11-23T17:08:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741964_1140 (size=8241) 2024-11-23T17:08:06,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741964_1140 (size=8241) 2024-11-23T17:08:06,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741964_1140 (size=8241) 2024-11-23T17:08:06,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741961_1137 (size=31749) 2024-11-23T17:08:06,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741961_1137 (size=31749) 2024-11-23T17:08:06,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741961_1137 (size=31749) 2024-11-23T17:08:06,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741965_1141 (size=465) 2024-11-23T17:08:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741965_1141 (size=465) 2024-11-23T17:08:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741965_1141 (size=465) 2024-11-23T17:08:06,309 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000005/launch_container.sh] 2024-11-23T17:08:06,309 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000005/container_tokens] 2024-11-23T17:08:06,309 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000005/sysfs] 2024-11-23T17:08:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741966_1142 (size=31749) 2024-11-23T17:08:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741966_1142 (size=31749) 2024-11-23T17:08:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741966_1142 (size=31749) 2024-11-23T17:08:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741967_1143 (size=349782) 2024-11-23T17:08:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741967_1143 (size=349782) 2024-11-23T17:08:06,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741967_1143 (size=349782) 2024-11-23T17:08:06,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000003/launch_container.sh] 2024-11-23T17:08:06,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000003/container_tokens] 2024-11-23T17:08:06,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000003/sysfs] 2024-11-23T17:08:06,355 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:38960 2024-11-23T17:08:06,364 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:38972 2024-11-23T17:08:07,611 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:08:07,612 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:08:07,619 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-11-23T17:08:07,619 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:08:07,620 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:08:07,620 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T17:08:07,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-23T17:08:07,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-23T17:08:07,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/testExportWithTargetName 2024-11-23T17:08:07,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-23T17:08:07,621 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381668329/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-23T17:08:07,628 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-23T17:08:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:08:07,632 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381687631"}]},"ts":"1732381687631"} 2024-11-23T17:08:07,634 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-23T17:08:07,634 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-23T17:08:07,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-23T17:08:07,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, UNASSIGN}] 2024-11-23T17:08:07,637 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, UNASSIGN 2024-11-23T17:08:07,637 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, UNASSIGN 2024-11-23T17:08:07,638 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=1d11954f6f2f4212f289500eac9f57fa, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:07,638 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=86de3ec4f1038bcfe993e8596174e0af, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:07,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, UNASSIGN because future has completed 2024-11-23T17:08:07,640 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:07,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:07,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, UNASSIGN because future has completed 2024-11-23T17:08:07,641 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:07,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:08:07,792 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 1d11954f6f2f4212f289500eac9f57fa, disabling compactions & flushes 2024-11-23T17:08:07,793 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. after waiting 0 ms 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:08:07,793 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:08:07,793 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:07,794 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 86de3ec4f1038bcfe993e8596174e0af, disabling compactions & flushes 2024-11-23T17:08:07,794 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:08:07,794 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:08:07,794 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. after waiting 0 ms 2024-11-23T17:08:07,794 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:08:07,798 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:07,798 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:07,799 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:07,799 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:07,799 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa. 2024-11-23T17:08:07,799 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af. 2024-11-23T17:08:07,799 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 1d11954f6f2f4212f289500eac9f57fa: Waiting for close lock at 1732381687793Running coprocessor pre-close hooks at 1732381687793Disabling compacts and flushes for region at 1732381687793Disabling writes for close at 1732381687793Writing region close event to WAL at 1732381687794 (+1 ms)Running coprocessor post-close hooks at 1732381687799 (+5 ms)Closed at 1732381687799 2024-11-23T17:08:07,799 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 86de3ec4f1038bcfe993e8596174e0af: Waiting for close lock at 1732381687793Running coprocessor pre-close hooks at 1732381687793Disabling compacts and flushes for region at 1732381687793Disabling writes for close at 1732381687794 (+1 ms)Writing region close event to WAL at 1732381687794Running coprocessor post-close hooks at 1732381687798 (+4 ms)Closed at 1732381687799 (+1 ms) 2024-11-23T17:08:07,800 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:08:07,801 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=86de3ec4f1038bcfe993e8596174e0af, regionState=CLOSED 2024-11-23T17:08:07,801 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:08:07,802 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=1d11954f6f2f4212f289500eac9f57fa, regionState=CLOSED 2024-11-23T17:08:07,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:07,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:07,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-11-23T17:08:07,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-11-23T17:08:07,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 1d11954f6f2f4212f289500eac9f57fa, server=1a7c5470b472,43453,1732381534825 in 164 msec 2024-11-23T17:08:07,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 86de3ec4f1038bcfe993e8596174e0af, server=1a7c5470b472,37875,1732381534675 in 163 msec 2024-11-23T17:08:07,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=86de3ec4f1038bcfe993e8596174e0af, UNASSIGN in 170 msec 2024-11-23T17:08:07,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-11-23T17:08:07,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=1d11954f6f2f4212f289500eac9f57fa, UNASSIGN in 171 msec 2024-11-23T17:08:07,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-11-23T17:08:07,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 174 msec 2024-11-23T17:08:07,813 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381687813"}]},"ts":"1732381687813"} 2024-11-23T17:08:07,815 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-23T17:08:07,815 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-23T17:08:07,817 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 187 msec 2024-11-23T17:08:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:08:07,948 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:08:07,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-23T17:08:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,950 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-23T17:08:07,951 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,955 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-23T17:08:07,957 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:08:07,957 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:08:07,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:08:07,960 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/recovered.edits] 2024-11-23T17:08:07,960 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/recovered.edits] 2024-11-23T17:08:07,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:08:07,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:08:07,960 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:08:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:07,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:08:07,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:07,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-23T17:08:07,966 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/cf/44b54fe95b2a40dc9dc9bb1c0053c3c7 2024-11-23T17:08:07,966 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/cf/bb7052aae88b4b108045669431c0a573 2024-11-23T17:08:07,969 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af/recovered.edits/9.seqid 2024-11-23T17:08:07,970 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa/recovered.edits/9.seqid 2024-11-23T17:08:07,970 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:08:07,970 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithTargetName/1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:08:07,970 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-23T17:08:07,971 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-23T17:08:07,972 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-23T17:08:07,976 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411231b38aa4f24b547b5ae8882a39463d362_1d11954f6f2f4212f289500eac9f57fa 2024-11-23T17:08:07,977 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411239c7f04e58f7b4e169e16d7619e3e3a1d_86de3ec4f1038bcfe993e8596174e0af 2024-11-23T17:08:07,978 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-23T17:08:07,980 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,983 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-23T17:08:07,986 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-23T17:08:07,987 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,987 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-23T17:08:07,987 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381687987"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:07,987 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381687987"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:07,989 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:08:07,989 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 86de3ec4f1038bcfe993e8596174e0af, NAME => 'testtb-testExportWithTargetName,,1732381666655.86de3ec4f1038bcfe993e8596174e0af.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1d11954f6f2f4212f289500eac9f57fa, NAME => 'testtb-testExportWithTargetName,1,1732381666655.1d11954f6f2f4212f289500eac9f57fa.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:08:07,989 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-23T17:08:07,989 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381687989"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:07,991 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-23T17:08:07,992 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:08:07,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 44 msec 2024-11-23T17:08:08,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-23T17:08:08,068 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-23T17:08:08,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:08:08,075 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T17:08:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-23T17:08:08,078 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T17:08:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-23T17:08:08,107 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=791 (was 758) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:60328 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42449 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:45558 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:45871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 32309) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2203 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:45871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1898850586_1 at /127.0.0.1:45528 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42449 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1898850586_1 at /127.0.0.1:38936 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:38966 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 796) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=501 (was 367) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 17), AvailableMemoryMB=3849 (was 2118) - AvailableMemoryMB LEAK? - 2024-11-23T17:08:08,107 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-23T17:08:08,123 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=791, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=501, ProcessCount=14, AvailableMemoryMB=3849 2024-11-23T17:08:08,123 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-11-23T17:08:08,125 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:08:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:08,127 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:08:08,127 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-11-23T17:08:08,128 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:08:08,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:08:08,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741968_1144 (size=440) 2024-11-23T17:08:08,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741968_1144 (size=440) 2024-11-23T17:08:08,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741968_1144 (size=440) 2024-11-23T17:08:08,138 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0547da56e62a809fa0c7179ccfb1e23a, NAME => 'testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:08,138 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fccbf8c10e348af89c4f1d21eb43cdb2, NAME => 'testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741969_1145 (size=65) 2024-11-23T17:08:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741969_1145 (size=65) 2024-11-23T17:08:08,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741969_1145 (size=65) 2024-11-23T17:08:08,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741970_1146 (size=65) 2024-11-23T17:08:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741970_1146 (size=65) 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing fccbf8c10e348af89c4f1d21eb43cdb2, disabling compactions & flushes 2024-11-23T17:08:08,153 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. after waiting 0 ms 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741970_1146 (size=65) 2024-11-23T17:08:08,153 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,153 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for fccbf8c10e348af89c4f1d21eb43cdb2: Waiting for close lock at 1732381688153Disabling compacts and flushes for region at 1732381688153Disabling writes for close at 1732381688153Writing region close event to WAL at 1732381688153Closed at 1732381688153 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 0547da56e62a809fa0c7179ccfb1e23a, disabling compactions & flushes 2024-11-23T17:08:08,154 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. after waiting 0 ms 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,154 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,154 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0547da56e62a809fa0c7179ccfb1e23a: Waiting for close lock at 1732381688154Disabling compacts and flushes for region at 1732381688154Disabling writes for close at 1732381688154Writing region close event to WAL at 1732381688154Closed at 1732381688154 2024-11-23T17:08:08,155 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:08:08,155 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381688155"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381688155"}]},"ts":"1732381688155"} 2024-11-23T17:08:08,156 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381688155"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381688155"}]},"ts":"1732381688155"} 2024-11-23T17:08:08,158 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:08:08,159 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:08:08,159 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381688159"}]},"ts":"1732381688159"} 2024-11-23T17:08:08,161 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T17:08:08,161 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:08:08,162 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:08:08,162 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:08:08,162 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:08:08,162 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:08:08,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, ASSIGN}] 2024-11-23T17:08:08,164 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, ASSIGN 2024-11-23T17:08:08,164 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, ASSIGN 2024-11-23T17:08:08,164 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:08:08,164 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:08:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:08:08,315 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:08:08,316 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=fccbf8c10e348af89c4f1d21eb43cdb2, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:08,316 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=0547da56e62a809fa0c7179ccfb1e23a, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:08,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, ASSIGN because future has completed 2024-11-23T17:08:08,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:08:08,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, ASSIGN because future has completed 2024-11-23T17:08:08,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:08:08,470 DEBUG [RSProcedureDispatcher-pool-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:08:08,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35313, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:08:08,475 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,475 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 0547da56e62a809fa0c7179ccfb1e23a, NAME => 'testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:08:08,476 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => fccbf8c10e348af89c4f1d21eb43cdb2, NAME => 'testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. service=AccessControlService 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. service=AccessControlService 2024-11-23T17:08:08,476 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:08,476 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:08,476 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:08,477 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,477 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,477 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,477 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,478 INFO [StoreOpener-fccbf8c10e348af89c4f1d21eb43cdb2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,478 INFO [StoreOpener-0547da56e62a809fa0c7179ccfb1e23a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,480 INFO [StoreOpener-fccbf8c10e348af89c4f1d21eb43cdb2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fccbf8c10e348af89c4f1d21eb43cdb2 columnFamilyName cf 2024-11-23T17:08:08,480 INFO [StoreOpener-0547da56e62a809fa0c7179ccfb1e23a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0547da56e62a809fa0c7179ccfb1e23a columnFamilyName cf 2024-11-23T17:08:08,481 DEBUG [StoreOpener-fccbf8c10e348af89c4f1d21eb43cdb2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:08,481 DEBUG [StoreOpener-0547da56e62a809fa0c7179ccfb1e23a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:08,481 INFO [StoreOpener-fccbf8c10e348af89c4f1d21eb43cdb2-1 {}] regionserver.HStore(327): Store=fccbf8c10e348af89c4f1d21eb43cdb2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:08,481 INFO [StoreOpener-0547da56e62a809fa0c7179ccfb1e23a-1 {}] regionserver.HStore(327): Store=0547da56e62a809fa0c7179ccfb1e23a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:08,482 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,482 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,483 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,483 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,483 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,483 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,484 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,484 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,484 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,484 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,485 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,485 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,487 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:08,487 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:08,488 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 0547da56e62a809fa0c7179ccfb1e23a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66974824, jitterRate=-0.00199735164642334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:08,488 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,488 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened fccbf8c10e348af89c4f1d21eb43cdb2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67796195, jitterRate=0.010242030024528503}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:08,488 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,489 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 0547da56e62a809fa0c7179ccfb1e23a: Running coprocessor pre-open hook at 1732381688477Writing region info on filesystem at 1732381688477Initializing all the Stores at 1732381688478 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381688478Cleaning up temporary data from old regions at 1732381688484 (+6 ms)Running coprocessor post-open hooks at 1732381688488 (+4 ms)Region opened successfully at 1732381688489 (+1 ms) 2024-11-23T17:08:08,489 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for fccbf8c10e348af89c4f1d21eb43cdb2: Running coprocessor pre-open hook at 1732381688477Writing region info on filesystem at 1732381688477Initializing all the Stores at 1732381688478 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381688478Cleaning up temporary data from old regions at 1732381688484 (+6 ms)Running coprocessor post-open hooks at 1732381688488 (+4 ms)Region opened successfully at 1732381688489 (+1 ms) 2024-11-23T17:08:08,490 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a., pid=67, masterSystemTime=1732381688471 2024-11-23T17:08:08,490 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2., pid=66, masterSystemTime=1732381688470 2024-11-23T17:08:08,492 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,492 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,492 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=0547da56e62a809fa0c7179ccfb1e23a, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:08,493 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,493 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,494 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=fccbf8c10e348af89c4f1d21eb43cdb2, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:08,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:08,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:08:08,497 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=1a7c5470b472,44711,1732381534872, table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:08:08,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-11-23T17:08:08,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675 in 176 msec 2024-11-23T17:08:08,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-11-23T17:08:08,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872 in 179 msec 2024-11-23T17:08:08,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, ASSIGN in 335 msec 2024-11-23T17:08:08,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-23T17:08:08,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, ASSIGN in 336 msec 2024-11-23T17:08:08,501 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:08:08,502 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381688501"}]},"ts":"1732381688501"} 2024-11-23T17:08:08,503 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T17:08:08,504 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:08:08,504 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-23T17:08:08,507 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:08:08,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:08,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:08,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:08,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:08,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:08,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:08,512 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:08,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:08,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 387 msec 2024-11-23T17:08:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:08:08,758 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:08:08,758 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:08,761 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-23T17:08:08,761 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,761 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:08,763 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:08,767 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:08,772 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:08,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:08,776 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:08,779 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:08:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381688779 (current time:1732381688779). 2024-11-23T17:08:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:08:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:08,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6522267f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:08,782 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:08,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:08,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:08,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1701b7e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:08,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:08,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,784 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:08,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bf0b3cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:08,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:08,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:08,787 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:08,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:08:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,788 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7787f15a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:08,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:08,790 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:08,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:08,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:08,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5634cc90, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:08,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:08,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,791 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:08,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@324f1975, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:08,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:08,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:08,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:08,794 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:08,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:08,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:08:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:08,798 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:08:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:08:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T17:08:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:08:08,801 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:08,802 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:08,804 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:08,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741971_1147 (size=161) 2024-11-23T17:08:08,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741971_1147 (size=161) 2024-11-23T17:08:08,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741971_1147 (size=161) 2024-11-23T17:08:08,813 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:08,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2}] 2024-11-23T17:08:08,814 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,814 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:08:08,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-11-23T17:08:08,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 0547da56e62a809fa0c7179ccfb1e23a: 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for fccbf8c10e348af89c4f1d21eb43cdb2: 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:08,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:08,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741973_1149 (size=68) 2024-11-23T17:08:08,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741973_1149 (size=68) 2024-11-23T17:08:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741972_1148 (size=68) 2024-11-23T17:08:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741972_1148 (size=68) 2024-11-23T17:08:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741973_1149 (size=68) 2024-11-23T17:08:08,977 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:08,977 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T17:08:08,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741972_1148 (size=68) 2024-11-23T17:08:08,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:08,978 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-11-23T17:08:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-11-23T17:08:08,978 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-11-23T17:08:08,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,978 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:08,979 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:08,981 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 in 166 msec 2024-11-23T17:08:08,982 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-11-23T17:08:08,982 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a in 166 msec 2024-11-23T17:08:08,982 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:08,982 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:08,983 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:08,983 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:08,984 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:08,984 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:08:08,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741974_1150 (size=60) 2024-11-23T17:08:08,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741974_1150 (size=60) 2024-11-23T17:08:08,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741974_1150 (size=60) 2024-11-23T17:08:08,993 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:08,993 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:08,993 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:09,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741975_1151 (size=641) 2024-11-23T17:08:09,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741975_1151 (size=641) 2024-11-23T17:08:09,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741975_1151 (size=641) 2024-11-23T17:08:09,011 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:09,016 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:09,017 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:09,018 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:09,018 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T17:08:09,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 219 msec 2024-11-23T17:08:09,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:08:09,119 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:08:09,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:09,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:09,132 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:09,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-23T17:08:09,136 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:09,136 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:09,138 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:09,144 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:09,151 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:09,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:08:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381689155 (current time:1732381689155). 2024-11-23T17:08:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:08:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:09,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efcdc53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:09,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:09,157 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:09,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:09,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:09,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75ccbfb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:09,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:09,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,158 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45384, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:09,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a557e16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:09,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:09,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:09,161 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52512, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:09,162 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:09,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:09,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,163 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:09,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64084e23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:09,165 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:09,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:09,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:09,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ef7a84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:09,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:09,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,166 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45402, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:09,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77a45507, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:09,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:09,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:09,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:09,169 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:09,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:09,173 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:09,174 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:09,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:08:09,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:08:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T17:08:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:08:09,177 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:09,178 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:09,180 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741976_1152 (size=156) 2024-11-23T17:08:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741976_1152 (size=156) 2024-11-23T17:08:09,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741976_1152 (size=156) 2024-11-23T17:08:09,197 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:09,198 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2}] 2024-11-23T17:08:09,199 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:09,199 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:09,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:08:09,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-11-23T17:08:09,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-11-23T17:08:09,356 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:09,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 0547da56e62a809fa0c7179ccfb1e23a 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T17:08:09,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:09,359 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing fccbf8c10e348af89c4f1d21eb43cdb2 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T17:08:09,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a is 71, key is 057a086bee29e4a67dff2310d1aae42d/cf:q/1732381689127/Put/seqid=0 2024-11-23T17:08:09,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 is 71, key is 19763248741c79c9f771c9956ec628f2/cf:q/1732381689129/Put/seqid=0 2024-11-23T17:08:09,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741978_1154 (size=8101) 2024-11-23T17:08:09,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741978_1154 (size=8101) 2024-11-23T17:08:09,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741978_1154 (size=8101) 2024-11-23T17:08:09,403 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741977_1153 (size=5172) 2024-11-23T17:08:09,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741977_1153 (size=5172) 2024-11-23T17:08:09,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741977_1153 (size=5172) 2024-11-23T17:08:09,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:09,426 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:09,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/.tmp/cf/196576713e0f4138b45c0677aae92757, store: [table=testtb-testExportWithResetTtl family=cf region=0547da56e62a809fa0c7179ccfb1e23a] 2024-11-23T17:08:09,428 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:09,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/.tmp/cf/196576713e0f4138b45c0677aae92757 is 206, key is 0277f28a4912f1869255d44036a7c50b1/cf:q/1732381689127/Put/seqid=0 2024-11-23T17:08:09,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/.tmp/cf/a56f83debde94846a8352e1fe27f0d33, store: [table=testtb-testExportWithResetTtl family=cf region=fccbf8c10e348af89c4f1d21eb43cdb2] 2024-11-23T17:08:09,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/.tmp/cf/a56f83debde94846a8352e1fe27f0d33 is 206, key is 1aa154cd66f4191655e5f067556f387d2/cf:q/1732381689129/Put/seqid=0 2024-11-23T17:08:09,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741979_1155 (size=6108) 2024-11-23T17:08:09,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741979_1155 (size=6108) 2024-11-23T17:08:09,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741979_1155 (size=6108) 2024-11-23T17:08:09,448 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/.tmp/cf/196576713e0f4138b45c0677aae92757 2024-11-23T17:08:09,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741980_1156 (size=14651) 2024-11-23T17:08:09,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741980_1156 (size=14651) 2024-11-23T17:08:09,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741980_1156 (size=14651) 2024-11-23T17:08:09,453 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/.tmp/cf/a56f83debde94846a8352e1fe27f0d33 2024-11-23T17:08:09,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/.tmp/cf/196576713e0f4138b45c0677aae92757 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757 2024-11-23T17:08:09,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/.tmp/cf/a56f83debde94846a8352e1fe27f0d33 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33 2024-11-23T17:08:09,465 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757, entries=4, sequenceid=6, filesize=6.0 K 2024-11-23T17:08:09,466 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 0547da56e62a809fa0c7179ccfb1e23a in 109ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:09,466 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33, entries=46, sequenceid=6, filesize=14.3 K 2024-11-23T17:08:09,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 0547da56e62a809fa0c7179ccfb1e23a: 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. for snaptb0-testExportWithResetTtl completed. 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757] hfiles 2024-11-23T17:08:09,467 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for fccbf8c10e348af89c4f1d21eb43cdb2 in 108ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757 for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for fccbf8c10e348af89c4f1d21eb43cdb2: 2024-11-23T17:08:09,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. for snaptb0-testExportWithResetTtl completed. 2024-11-23T17:08:09,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:09,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33] hfiles 2024-11-23T17:08:09,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33 for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741981_1157 (size=107) 2024-11-23T17:08:09,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741981_1157 (size=107) 2024-11-23T17:08:09,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:09,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T17:08:09,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741981_1157 (size=107) 2024-11-23T17:08:09,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-11-23T17:08:09,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:09,488 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:09,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741982_1158 (size=107) 2024-11-23T17:08:09,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741982_1158 (size=107) 2024-11-23T17:08:09,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:09,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741982_1158 (size=107) 2024-11-23T17:08:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-11-23T17:08:09,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a in 292 msec 2024-11-23T17:08:09,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-11-23T17:08:09,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:09,495 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:09,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:08:09,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-11-23T17:08:09,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2 in 299 msec 2024-11-23T17:08:09,499 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:09,500 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:09,501 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:09,501 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:09,501 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:09,503 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a] hfiles 2024-11-23T17:08:09,504 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:09,504 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:09,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741983_1159 (size=291) 2024-11-23T17:08:09,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741983_1159 (size=291) 2024-11-23T17:08:09,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741983_1159 (size=291) 2024-11-23T17:08:09,521 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:09,521 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,522 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741984_1160 (size=951) 2024-11-23T17:08:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741984_1160 (size=951) 2024-11-23T17:08:09,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741984_1160 (size=951) 2024-11-23T17:08:09,542 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:09,549 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:09,550 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-23T17:08:09,551 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:09,551 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T17:08:09,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 376 msec 2024-11-23T17:08:09,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:08:09,808 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:08:09,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:08:09,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:09,811 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:08:09,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-11-23T17:08:09,812 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:08:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:08:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741985_1161 (size=433) 2024-11-23T17:08:09,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741985_1161 (size=433) 2024-11-23T17:08:09,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741985_1161 (size=433) 2024-11-23T17:08:09,821 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28a8da401eb8b48ea494978295421db9, NAME => 'testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:09,822 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ca3ad0775b5f827a33948d3523056bca, NAME => 'testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741986_1162 (size=58) 2024-11-23T17:08:09,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741986_1162 (size=58) 2024-11-23T17:08:09,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741987_1163 (size=58) 2024-11-23T17:08:09,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741986_1162 (size=58) 2024-11-23T17:08:09,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741987_1163 (size=58) 2024-11-23T17:08:09,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741987_1163 (size=58) 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ca3ad0775b5f827a33948d3523056bca, disabling compactions & flushes 2024-11-23T17:08:09,830 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. after waiting 0 ms 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:09,830 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ca3ad0775b5f827a33948d3523056bca: Waiting for close lock at 1732381689830Disabling compacts and flushes for region at 1732381689830Disabling writes for close at 1732381689830Writing region close event to WAL at 1732381689830Closed at 1732381689830 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 28a8da401eb8b48ea494978295421db9, disabling compactions & flushes 2024-11-23T17:08:09,830 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. after waiting 0 ms 2024-11-23T17:08:09,830 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:09,830 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:09,831 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28a8da401eb8b48ea494978295421db9: Waiting for close lock at 1732381689830Disabling compacts and flushes for region at 1732381689830Disabling writes for close at 1732381689830Writing region close event to WAL at 1732381689830Closed at 1732381689830 2024-11-23T17:08:09,832 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:08:09,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732381689832"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381689832"}]},"ts":"1732381689832"} 2024-11-23T17:08:09,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732381689832"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381689832"}]},"ts":"1732381689832"} 2024-11-23T17:08:09,835 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:08:09,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:08:09,836 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381689835"}]},"ts":"1732381689835"} 2024-11-23T17:08:09,837 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T17:08:09,837 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:08:09,838 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:08:09,838 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:08:09,838 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:08:09,838 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:08:09,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, ASSIGN}] 2024-11-23T17:08:09,840 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, ASSIGN 2024-11-23T17:08:09,840 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, ASSIGN 2024-11-23T17:08:09,840 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:08:09,840 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:08:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:08:09,991 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:08:09,991 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=28a8da401eb8b48ea494978295421db9, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:09,991 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=ca3ad0775b5f827a33948d3523056bca, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:09,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, ASSIGN because future has completed 2024-11-23T17:08:09,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:09,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, ASSIGN because future has completed 2024-11-23T17:08:09,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:10,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:08:10,149 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:10,149 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => ca3ad0775b5f827a33948d3523056bca, NAME => 'testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:08:10,150 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. service=AccessControlService 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 28a8da401eb8b48ea494978295421db9, NAME => 'testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:08:10,150 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. service=AccessControlService 2024-11-23T17:08:10,150 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,151 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:10,151 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,151 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:10,151 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,151 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,152 INFO [StoreOpener-ca3ad0775b5f827a33948d3523056bca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,154 INFO [StoreOpener-ca3ad0775b5f827a33948d3523056bca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ca3ad0775b5f827a33948d3523056bca columnFamilyName cf 2024-11-23T17:08:10,155 DEBUG [StoreOpener-ca3ad0775b5f827a33948d3523056bca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:10,156 INFO [StoreOpener-28a8da401eb8b48ea494978295421db9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,156 INFO [StoreOpener-ca3ad0775b5f827a33948d3523056bca-1 {}] regionserver.HStore(327): Store=ca3ad0775b5f827a33948d3523056bca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:10,157 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,157 INFO [StoreOpener-28a8da401eb8b48ea494978295421db9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28a8da401eb8b48ea494978295421db9 columnFamilyName cf 2024-11-23T17:08:10,158 DEBUG [StoreOpener-28a8da401eb8b48ea494978295421db9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:10,158 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,158 INFO [StoreOpener-28a8da401eb8b48ea494978295421db9-1 {}] regionserver.HStore(327): Store=28a8da401eb8b48ea494978295421db9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:10,158 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,159 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,159 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,159 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,160 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,160 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,161 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,161 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,161 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,163 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,164 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:10,164 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened ca3ad0775b5f827a33948d3523056bca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74210067, jitterRate=0.10581617057323456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:10,164 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,165 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for ca3ad0775b5f827a33948d3523056bca: Running coprocessor pre-open hook at 1732381690151Writing region info on filesystem at 1732381690151Initializing all the Stores at 1732381690152 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381690152Cleaning up temporary data from old regions at 1732381690159 (+7 ms)Running coprocessor post-open hooks at 1732381690164 (+5 ms)Region opened successfully at 1732381690165 (+1 ms) 2024-11-23T17:08:10,165 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:10,166 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 28a8da401eb8b48ea494978295421db9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67184038, jitterRate=0.0011201798915863037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:10,166 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca., pid=77, masterSystemTime=1732381690145 2024-11-23T17:08:10,166 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,166 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 28a8da401eb8b48ea494978295421db9: Running coprocessor pre-open hook at 1732381690151Writing region info on filesystem at 1732381690151Initializing all the Stores at 1732381690152 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381690152Cleaning up temporary data from old regions at 1732381690161 (+9 ms)Running coprocessor post-open hooks at 1732381690166 (+5 ms)Region opened successfully at 1732381690166 2024-11-23T17:08:10,168 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9., pid=78, masterSystemTime=1732381690146 2024-11-23T17:08:10,169 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:10,169 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:10,170 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=ca3ad0775b5f827a33948d3523056bca, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:10,170 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=28a8da401eb8b48ea494978295421db9, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:10,171 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,171 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:10,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:10,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-23T17:08:10,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675 in 180 msec 2024-11-23T17:08:10,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-11-23T17:08:10,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, ASSIGN in 336 msec 2024-11-23T17:08:10,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825 in 180 msec 2024-11-23T17:08:10,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-11-23T17:08:10,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, ASSIGN in 338 msec 2024-11-23T17:08:10,179 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:08:10,180 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381690179"}]},"ts":"1732381690179"} 2024-11-23T17:08:10,181 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T17:08:10,182 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:08:10,182 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-23T17:08:10,185 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:08:10,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:10,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:10,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:10,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,197 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,197 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:10,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 386 msec 2024-11-23T17:08:10,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:08:10,438 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:08:10,438 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-23T17:08:10,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,441 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:10,443 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,449 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,454 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:10,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:10,465 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,468 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-23T17:08:10,468 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,468 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:10,469 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,474 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,479 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:08:10,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T17:08:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381690482 (current time:1732381690482). 2024-11-23T17:08:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:08:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ea141d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:10,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:10,484 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:10,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:10,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:10,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d3d3ed0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:10,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:10,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,485 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45418, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:10,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f276cd1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:10,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:10,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:10,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:10,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,490 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d42b8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:10,492 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:10,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:10,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:10,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56490de5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:10,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:10,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,493 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45436, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:10,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45617955, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:10,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:10,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:10,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:10,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:10,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:10,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:10,501 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:10,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:08:10,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T17:08:10,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T17:08:10,504 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:10,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:08:10,505 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:10,507 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741988_1164 (size=143) 2024-11-23T17:08:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741988_1164 (size=143) 2024-11-23T17:08:10,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741988_1164 (size=143) 2024-11-23T17:08:10,516 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:10,516 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28a8da401eb8b48ea494978295421db9}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca3ad0775b5f827a33948d3523056bca}] 2024-11-23T17:08:10,517 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,517 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:10,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:08:10,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-11-23T17:08:10,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-11-23T17:08:10,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:10,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:10,670 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing ca3ad0775b5f827a33948d3523056bca 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T17:08:10,670 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 28a8da401eb8b48ea494978295421db9 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T17:08:10,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 is 71, key is 053676d987e56231cf2ac6f7d68b4e8a/cf:q/1732381690462/Put/seqid=0 2024-11-23T17:08:10,701 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca is 71, key is 16ddf055487e6c8e737999a33bc15536/cf:q/1732381690464/Put/seqid=0 2024-11-23T17:08:10,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741989_1165 (size=5312) 2024-11-23T17:08:10,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741989_1165 (size=5312) 2024-11-23T17:08:10,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741989_1165 (size=5312) 2024-11-23T17:08:10,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741990_1166 (size=7961) 2024-11-23T17:08:10,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741990_1166 (size=7961) 2024-11-23T17:08:10,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741990_1166 (size=7961) 2024-11-23T17:08:10,711 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:10,717 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:10,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/.tmp/cf/ad27bd63f36d4f009b183fd3f8552abf, store: [table=testExportWithResetTtl family=cf region=ca3ad0775b5f827a33948d3523056bca] 2024-11-23T17:08:10,719 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/.tmp/cf/ad27bd63f36d4f009b183fd3f8552abf is 199, key is 1fcf93bffbfd054b21f0c99b6067af444/cf:q/1732381690464/Put/seqid=0 2024-11-23T17:08:10,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741991_1167 (size=13932) 2024-11-23T17:08:10,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741991_1167 (size=13932) 2024-11-23T17:08:10,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741991_1167 (size=13932) 2024-11-23T17:08:10,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:08:11,106 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:11,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:11,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/.tmp/cf/9416de3887e3465981a5b35a359cb61f, store: [table=testExportWithResetTtl family=cf region=28a8da401eb8b48ea494978295421db9] 2024-11-23T17:08:11,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/.tmp/cf/9416de3887e3465981a5b35a359cb61f is 199, key is 015264b7b58ed15b830d45f71ed69fbfd/cf:q/1732381690462/Put/seqid=0 2024-11-23T17:08:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741992_1168 (size=6463) 2024-11-23T17:08:11,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741992_1168 (size=6463) 2024-11-23T17:08:11,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741992_1168 (size=6463) 2024-11-23T17:08:11,127 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/.tmp/cf/ad27bd63f36d4f009b183fd3f8552abf 2024-11-23T17:08:11,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:08:11,133 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/.tmp/cf/ad27bd63f36d4f009b183fd3f8552abf as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf 2024-11-23T17:08:11,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf, entries=44, sequenceid=5, filesize=13.6 K 2024-11-23T17:08:11,140 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for ca3ad0775b5f827a33948d3523056bca in 469ms, sequenceid=5, compaction requested=false 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for ca3ad0775b5f827a33948d3523056bca: 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. for snaptb-testExportWithResetTtl completed. 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf] hfiles 2024-11-23T17:08:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:08:11,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741993_1169 (size=100) 2024-11-23T17:08:11,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741993_1169 (size=100) 2024-11-23T17:08:11,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741993_1169 (size=100) 2024-11-23T17:08:11,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:11,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-11-23T17:08:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-11-23T17:08:11,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:11,150 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:11,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ca3ad0775b5f827a33948d3523056bca in 635 msec 2024-11-23T17:08:11,523 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/.tmp/cf/9416de3887e3465981a5b35a359cb61f 2024-11-23T17:08:11,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/.tmp/cf/9416de3887e3465981a5b35a359cb61f as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f 2024-11-23T17:08:11,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f, entries=6, sequenceid=5, filesize=6.3 K 2024-11-23T17:08:11,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 28a8da401eb8b48ea494978295421db9 in 873ms, sequenceid=5, compaction requested=false 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 28a8da401eb8b48ea494978295421db9: 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. for snaptb-testExportWithResetTtl completed. 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f] hfiles 2024-11-23T17:08:11,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:08:11,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741994_1170 (size=100) 2024-11-23T17:08:11,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741994_1170 (size=100) 2024-11-23T17:08:11,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741994_1170 (size=100) 2024-11-23T17:08:11,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:11,565 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T17:08:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-11-23T17:08:11,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:11,567 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:11,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-11-23T17:08:11,570 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:11,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28a8da401eb8b48ea494978295421db9 in 1.0510 sec 2024-11-23T17:08:11,571 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:11,572 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:11,572 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:11,572 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:11,574 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9] hfiles 2024-11-23T17:08:11,574 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:11,574 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:11,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741995_1171 (size=284) 2024-11-23T17:08:11,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741995_1171 (size=284) 2024-11-23T17:08:11,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741995_1171 (size=284) 2024-11-23T17:08:11,594 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:11,594 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-23T17:08:11,595 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:08:11,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741996_1172 (size=923) 2024-11-23T17:08:11,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741996_1172 (size=923) 2024-11-23T17:08:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741996_1172 (size=923) 2024-11-23T17:08:11,622 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:11,630 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:11,630 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:08:11,632 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:11,632 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T17:08:11,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 1.1300 sec 2024-11-23T17:08:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:08:11,638 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:08:11,648 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648 2024-11-23T17:08:11,648 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:11,680 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:11,680 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:08:11,683 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:08:11,688 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:08:11,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741997_1173 (size=143) 2024-11-23T17:08:11,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741997_1173 (size=143) 2024-11-23T17:08:11,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741997_1173 (size=143) 2024-11-23T17:08:11,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741998_1174 (size=923) 2024-11-23T17:08:11,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741998_1174 (size=923) 2024-11-23T17:08:11,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741998_1174 (size=923) 2024-11-23T17:08:11,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741999_1175 (size=141) 2024-11-23T17:08:11,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741999_1175 (size=141) 2024-11-23T17:08:11,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741999_1175 (size=141) 2024-11-23T17:08:11,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:11,737 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:11,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,465 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0002_000001 (auth:SIMPLE) from 127.0.0.1:45256 2024-11-23T17:08:12,477 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000001/launch_container.sh] 2024-11-23T17:08:12,477 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000001/container_tokens] 2024-11-23T17:08:12,477 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0002/container_1732381541272_0002_01_000001/sysfs] 2024-11-23T17:08:12,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-5906441473216960679.jar 2024-11-23T17:08:12,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-5160531201062490281.jar 2024-11-23T17:08:12,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:12,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:08:12,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:08:12,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:08:12,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:08:12,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:08:12,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:08:12,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:08:12,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:08:12,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:08:12,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:08:12,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:08:12,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:12,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:12,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:12,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:12,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:12,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:12,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:13,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742000_1176 (size=24020) 2024-11-23T17:08:13,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742000_1176 (size=24020) 2024-11-23T17:08:13,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742000_1176 (size=24020) 2024-11-23T17:08:13,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742001_1177 (size=77755) 2024-11-23T17:08:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742001_1177 (size=77755) 2024-11-23T17:08:13,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742001_1177 (size=77755) 2024-11-23T17:08:13,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742002_1178 (size=131360) 2024-11-23T17:08:13,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742002_1178 (size=131360) 2024-11-23T17:08:13,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742002_1178 (size=131360) 2024-11-23T17:08:13,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742003_1179 (size=111793) 2024-11-23T17:08:13,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742003_1179 (size=111793) 2024-11-23T17:08:13,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742003_1179 (size=111793) 2024-11-23T17:08:13,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742004_1180 (size=1832290) 2024-11-23T17:08:13,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742004_1180 (size=1832290) 2024-11-23T17:08:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742004_1180 (size=1832290) 2024-11-23T17:08:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742005_1181 (size=440956) 2024-11-23T17:08:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742005_1181 (size=440956) 2024-11-23T17:08:13,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742005_1181 (size=440956) 2024-11-23T17:08:13,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742006_1182 (size=8360005) 2024-11-23T17:08:13,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742006_1182 (size=8360005) 2024-11-23T17:08:13,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742006_1182 (size=8360005) 2024-11-23T17:08:13,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742007_1183 (size=503880) 2024-11-23T17:08:13,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742007_1183 (size=503880) 2024-11-23T17:08:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742007_1183 (size=503880) 2024-11-23T17:08:13,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742008_1184 (size=322274) 2024-11-23T17:08:13,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742008_1184 (size=322274) 2024-11-23T17:08:13,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742008_1184 (size=322274) 2024-11-23T17:08:13,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742009_1185 (size=20406) 2024-11-23T17:08:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742009_1185 (size=20406) 2024-11-23T17:08:13,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742009_1185 (size=20406) 2024-11-23T17:08:13,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742010_1186 (size=45609) 2024-11-23T17:08:13,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742010_1186 (size=45609) 2024-11-23T17:08:13,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742010_1186 (size=45609) 2024-11-23T17:08:13,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742011_1187 (size=136454) 2024-11-23T17:08:13,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742011_1187 (size=136454) 2024-11-23T17:08:13,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742011_1187 (size=136454) 2024-11-23T17:08:13,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742012_1188 (size=1597270) 2024-11-23T17:08:13,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742012_1188 (size=1597270) 2024-11-23T17:08:13,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742012_1188 (size=1597270) 2024-11-23T17:08:13,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742013_1189 (size=30873) 2024-11-23T17:08:13,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742013_1189 (size=30873) 2024-11-23T17:08:13,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742013_1189 (size=30873) 2024-11-23T17:08:13,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742014_1190 (size=29229) 2024-11-23T17:08:13,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742014_1190 (size=29229) 2024-11-23T17:08:13,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742014_1190 (size=29229) 2024-11-23T17:08:13,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742015_1191 (size=903661) 2024-11-23T17:08:13,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742015_1191 (size=903661) 2024-11-23T17:08:13,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742015_1191 (size=903661) 2024-11-23T17:08:13,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742016_1192 (size=5175431) 2024-11-23T17:08:13,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742016_1192 (size=5175431) 2024-11-23T17:08:13,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742016_1192 (size=5175431) 2024-11-23T17:08:13,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742017_1193 (size=232881) 2024-11-23T17:08:13,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742017_1193 (size=232881) 2024-11-23T17:08:13,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742017_1193 (size=232881) 2024-11-23T17:08:13,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742018_1194 (size=1323991) 2024-11-23T17:08:13,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742018_1194 (size=1323991) 2024-11-23T17:08:13,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742018_1194 (size=1323991) 2024-11-23T17:08:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742019_1195 (size=4695811) 2024-11-23T17:08:13,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742019_1195 (size=4695811) 2024-11-23T17:08:13,340 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:08:13,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742019_1195 (size=4695811) 2024-11-23T17:08:13,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742020_1196 (size=1877034) 2024-11-23T17:08:13,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742020_1196 (size=1877034) 2024-11-23T17:08:13,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742020_1196 (size=1877034) 2024-11-23T17:08:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742021_1197 (size=6424742) 2024-11-23T17:08:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742021_1197 (size=6424742) 2024-11-23T17:08:13,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742021_1197 (size=6424742) 2024-11-23T17:08:13,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742022_1198 (size=217555) 2024-11-23T17:08:13,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742022_1198 (size=217555) 2024-11-23T17:08:13,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742022_1198 (size=217555) 2024-11-23T17:08:13,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742023_1199 (size=4188619) 2024-11-23T17:08:13,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742023_1199 (size=4188619) 2024-11-23T17:08:13,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742023_1199 (size=4188619) 2024-11-23T17:08:13,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742024_1200 (size=127628) 2024-11-23T17:08:13,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742024_1200 (size=127628) 2024-11-23T17:08:13,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742024_1200 (size=127628) 2024-11-23T17:08:13,509 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:08:13,512 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-23T17:08:13,514 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.6 K 2024-11-23T17:08:13,515 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-11-23T17:08:13,515 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.3 K 2024-11-23T17:08:13,515 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-11-23T17:08:13,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742025_1201 (size=995) 2024-11-23T17:08:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742025_1201 (size=995) 2024-11-23T17:08:13,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742025_1201 (size=995) 2024-11-23T17:08:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742026_1202 (size=35) 2024-11-23T17:08:13,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742026_1202 (size=35) 2024-11-23T17:08:13,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742026_1202 (size=35) 2024-11-23T17:08:13,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742027_1203 (size=304071) 2024-11-23T17:08:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742027_1203 (size=304071) 2024-11-23T17:08:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742027_1203 (size=304071) 2024-11-23T17:08:13,614 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:13,615 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:14,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T17:08:14,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:08:14,341 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:51114 2024-11-23T17:08:14,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T17:08:14,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:08:14,342 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T17:08:19,845 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:08:20,342 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:46556 2024-11-23T17:08:20,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742028_1204 (size=349769) 2024-11-23T17:08:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742028_1204 (size=349769) 2024-11-23T17:08:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742028_1204 (size=349769) 2024-11-23T17:08:22,593 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:48976 2024-11-23T17:08:22,593 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:40832 2024-11-23T17:08:23,451 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:40846 2024-11-23T17:08:23,464 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:48984 2024-11-23T17:08:25,466 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0003_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:08:27,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742029_1205 (size=7961) 2024-11-23T17:08:27,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742029_1205 (size=7961) 2024-11-23T17:08:27,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742029_1205 (size=7961) 2024-11-23T17:08:27,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000003/launch_container.sh] 2024-11-23T17:08:27,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000003/container_tokens] 2024-11-23T17:08:27,866 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000003/sysfs] 2024-11-23T17:08:28,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742031_1207 (size=13932) 2024-11-23T17:08:28,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742031_1207 (size=13932) 2024-11-23T17:08:28,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742031_1207 (size=13932) 2024-11-23T17:08:29,084 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000002/launch_container.sh] 2024-11-23T17:08:29,084 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000002/container_tokens] 2024-11-23T17:08:29,084 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000002/sysfs] 2024-11-23T17:08:29,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742032_1208 (size=6463) 2024-11-23T17:08:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742032_1208 (size=6463) 2024-11-23T17:08:29,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742032_1208 (size=6463) 2024-11-23T17:08:29,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742033_1209 (size=5312) 2024-11-23T17:08:29,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742033_1209 (size=5312) 2024-11-23T17:08:29,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742033_1209 (size=5312) 2024-11-23T17:08:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742030_1206 (size=31704) 2024-11-23T17:08:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742030_1206 (size=31704) 2024-11-23T17:08:29,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742030_1206 (size=31704) 2024-11-23T17:08:29,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742034_1210 (size=462) 2024-11-23T17:08:29,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742034_1210 (size=462) 2024-11-23T17:08:29,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742034_1210 (size=462) 2024-11-23T17:08:29,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742035_1211 (size=31704) 2024-11-23T17:08:29,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742035_1211 (size=31704) 2024-11-23T17:08:29,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742035_1211 (size=31704) 2024-11-23T17:08:29,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742036_1212 (size=349769) 2024-11-23T17:08:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742036_1212 (size=349769) 2024-11-23T17:08:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742036_1212 (size=349769) 2024-11-23T17:08:29,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000005/launch_container.sh] 2024-11-23T17:08:29,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000005/container_tokens] 2024-11-23T17:08:29,942 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000005/sysfs] 2024-11-23T17:08:29,950 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:48998 2024-11-23T17:08:29,956 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:40850 2024-11-23T17:08:31,821 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:08:31,823 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:08:31,834 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-11-23T17:08:31,834 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:08:31,835 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:08:31,835 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:08:31,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T17:08:31,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T17:08:31,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:08:31,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T17:08:31,836 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381691648/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T17:08:31,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-23T17:08:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:08:31,846 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381711846"}]},"ts":"1732381711846"} 2024-11-23T17:08:31,848 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T17:08:31,848 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-23T17:08:31,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-23T17:08:31,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, UNASSIGN}] 2024-11-23T17:08:31,852 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, UNASSIGN 2024-11-23T17:08:31,852 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, UNASSIGN 2024-11-23T17:08:31,853 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=ca3ad0775b5f827a33948d3523056bca, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:31,853 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=28a8da401eb8b48ea494978295421db9, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, UNASSIGN because future has completed 2024-11-23T17:08:31,855 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:31,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:31,855 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, UNASSIGN because future has completed 2024-11-23T17:08:31,856 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:31,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:08:32,008 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:32,008 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:32,008 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 28a8da401eb8b48ea494978295421db9, disabling compactions & flushes 2024-11-23T17:08:32,009 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:32,009 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:32,009 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. after waiting 0 ms 2024-11-23T17:08:32,009 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:32,010 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:32,010 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:32,010 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing ca3ad0775b5f827a33948d3523056bca, disabling compactions & flushes 2024-11-23T17:08:32,010 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:32,010 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:32,010 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. after waiting 0 ms 2024-11-23T17:08:32,010 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:32,016 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:08:32,017 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:32,017 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9. 2024-11-23T17:08:32,017 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 28a8da401eb8b48ea494978295421db9: Waiting for close lock at 1732381712008Running coprocessor pre-close hooks at 1732381712008Disabling compacts and flushes for region at 1732381712008Disabling writes for close at 1732381712009 (+1 ms)Writing region close event to WAL at 1732381712010 (+1 ms)Running coprocessor post-close hooks at 1732381712017 (+7 ms)Closed at 1732381712017 2024-11-23T17:08:32,019 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:08:32,020 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:32,020 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca. 2024-11-23T17:08:32,020 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for ca3ad0775b5f827a33948d3523056bca: Waiting for close lock at 1732381712010Running coprocessor pre-close hooks at 1732381712010Disabling compacts and flushes for region at 1732381712010Disabling writes for close at 1732381712010Writing region close event to WAL at 1732381712011 (+1 ms)Running coprocessor post-close hooks at 1732381712019 (+8 ms)Closed at 1732381712020 (+1 ms) 2024-11-23T17:08:32,020 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:32,020 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=28a8da401eb8b48ea494978295421db9, regionState=CLOSED 2024-11-23T17:08:32,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:32,025 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:32,026 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=ca3ad0775b5f827a33948d3523056bca, regionState=CLOSED 2024-11-23T17:08:32,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:32,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-23T17:08:32,032 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 28a8da401eb8b48ea494978295421db9, server=1a7c5470b472,43453,1732381534825 in 170 msec 2024-11-23T17:08:32,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-11-23T17:08:32,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=28a8da401eb8b48ea494978295421db9, UNASSIGN in 180 msec 2024-11-23T17:08:32,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure ca3ad0775b5f827a33948d3523056bca, server=1a7c5470b472,37875,1732381534675 in 174 msec 2024-11-23T17:08:32,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-11-23T17:08:32,041 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ca3ad0775b5f827a33948d3523056bca, UNASSIGN in 182 msec 2024-11-23T17:08:32,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-11-23T17:08:32,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 193 msec 2024-11-23T17:08:32,046 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381712046"}]},"ts":"1732381712046"} 2024-11-23T17:08:32,050 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T17:08:32,050 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-23T17:08:32,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 208 msec 2024-11-23T17:08:32,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:08:32,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:08:32,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-23T17:08:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,161 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-23T17:08:32,162 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,167 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-23T17:08:32,167 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:32,167 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,170 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/recovered.edits] 2024-11-23T17:08:32,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,171 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,173 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/recovered.edits] 2024-11-23T17:08:32,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-11-23T17:08:32,173 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:08:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-23T17:08:32,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:32,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:32,175 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:32,175 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:32,178 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/cf/9416de3887e3465981a5b35a359cb61f 2024-11-23T17:08:32,181 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/recovered.edits/8.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9/recovered.edits/8.seqid 2024-11-23T17:08:32,181 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:32,186 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/cf/ad27bd63f36d4f009b183fd3f8552abf 2024-11-23T17:08:32,190 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/recovered.edits/8.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca/recovered.edits/8.seqid 2024-11-23T17:08:32,190 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportWithResetTtl/ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:32,191 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-23T17:08:32,191 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-23T17:08:32,192 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-23T17:08:32,196 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241123039a05a55b744cb7b8b9ccab95b067e2_ca3ad0775b5f827a33948d3523056bca 2024-11-23T17:08:32,197 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241123dce40466f53e4fe580653bb3f226a4dd_28a8da401eb8b48ea494978295421db9 2024-11-23T17:08:32,198 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-23T17:08:32,201 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,204 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-23T17:08:32,207 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-23T17:08:32,209 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,209 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-23T17:08:32,209 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381712209"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,209 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381712209"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,212 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:08:32,212 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 28a8da401eb8b48ea494978295421db9, NAME => 'testExportWithResetTtl,,1732381689809.28a8da401eb8b48ea494978295421db9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ca3ad0775b5f827a33948d3523056bca, NAME => 'testExportWithResetTtl,1,1732381689809.ca3ad0775b5f827a33948d3523056bca.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:08:32,212 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-23T17:08:32,212 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381712212"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-23T17:08:32,215 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:08:32,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 56 msec 2024-11-23T17:08:32,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-23T17:08:32,278 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-23T17:08:32,279 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:08:32,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-23T17:08:32,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:08:32,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381712296"}]},"ts":"1732381712296"} 2024-11-23T17:08:32,299 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T17:08:32,299 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-23T17:08:32,300 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-23T17:08:32,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, UNASSIGN}] 2024-11-23T17:08:32,305 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, UNASSIGN 2024-11-23T17:08:32,305 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, UNASSIGN 2024-11-23T17:08:32,307 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=fccbf8c10e348af89c4f1d21eb43cdb2, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:32,307 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=0547da56e62a809fa0c7179ccfb1e23a, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:32,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, UNASSIGN because future has completed 2024-11-23T17:08:32,311 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:32,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:08:32,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, UNASSIGN because future has completed 2024-11-23T17:08:32,313 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:32,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:08:32,466 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:32,466 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:32,466 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing fccbf8c10e348af89c4f1d21eb43cdb2, disabling compactions & flushes 2024-11-23T17:08:32,466 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:32,466 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:32,466 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. after waiting 0 ms 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 0547da56e62a809fa0c7179ccfb1e23a, disabling compactions & flushes 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:32,467 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. after waiting 0 ms 2024-11-23T17:08:32,467 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:32,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:32,482 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:32,482 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a. 2024-11-23T17:08:32,482 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 0547da56e62a809fa0c7179ccfb1e23a: Waiting for close lock at 1732381712467Running coprocessor pre-close hooks at 1732381712467Disabling compacts and flushes for region at 1732381712467Disabling writes for close at 1732381712467Writing region close event to WAL at 1732381712468 (+1 ms)Running coprocessor post-close hooks at 1732381712482 (+14 ms)Closed at 1732381712482 2024-11-23T17:08:32,485 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:32,485 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:32,486 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:32,486 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2. 2024-11-23T17:08:32,486 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for fccbf8c10e348af89c4f1d21eb43cdb2: Waiting for close lock at 1732381712466Running coprocessor pre-close hooks at 1732381712466Disabling compacts and flushes for region at 1732381712466Disabling writes for close at 1732381712467 (+1 ms)Writing region close event to WAL at 1732381712470 (+3 ms)Running coprocessor post-close hooks at 1732381712486 (+16 ms)Closed at 1732381712486 2024-11-23T17:08:32,486 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=0547da56e62a809fa0c7179ccfb1e23a, regionState=CLOSED 2024-11-23T17:08:32,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:32,489 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:32,489 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=fccbf8c10e348af89c4f1d21eb43cdb2, regionState=CLOSED 2024-11-23T17:08:32,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:08:32,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-11-23T17:08:32,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 0547da56e62a809fa0c7179ccfb1e23a, server=1a7c5470b472,37875,1732381534675 in 177 msec 2024-11-23T17:08:32,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-11-23T17:08:32,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure fccbf8c10e348af89c4f1d21eb43cdb2, server=1a7c5470b472,44711,1732381534872 in 181 msec 2024-11-23T17:08:32,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=0547da56e62a809fa0c7179ccfb1e23a, UNASSIGN in 189 msec 2024-11-23T17:08:32,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-23T17:08:32,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=fccbf8c10e348af89c4f1d21eb43cdb2, UNASSIGN in 191 msec 2024-11-23T17:08:32,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T17:08:32,502 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 199 msec 2024-11-23T17:08:32,504 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381712504"}]},"ts":"1732381712504"} 2024-11-23T17:08:32,507 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T17:08:32,507 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-23T17:08:32,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 229 msec 2024-11-23T17:08:32,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:08:32,608 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:08:32,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-23T17:08:32,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,612 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-23T17:08:32,613 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,617 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-23T17:08:32,619 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:32,619 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:32,622 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/recovered.edits] 2024-11-23T17:08:32,622 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/recovered.edits] 2024-11-23T17:08:32,627 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/cf/196576713e0f4138b45c0677aae92757 2024-11-23T17:08:32,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,629 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,629 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,629 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,629 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/cf/a56f83debde94846a8352e1fe27f0d33 2024-11-23T17:08:32,629 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:08:32,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:08:32,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-23T17:08:32,634 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a/recovered.edits/9.seqid 2024-11-23T17:08:32,635 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:32,639 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2/recovered.edits/9.seqid 2024-11-23T17:08:32,640 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithResetTtl/fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:32,640 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-23T17:08:32,641 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-23T17:08:32,641 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-23T17:08:32,646 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411232d3d14e62bf74b29b57e38ed1694ab97_fccbf8c10e348af89c4f1d21eb43cdb2 2024-11-23T17:08:32,649 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411235c4bf73c4b8a4edc8b4d12d8ffe6aea4_0547da56e62a809fa0c7179ccfb1e23a 2024-11-23T17:08:32,649 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-23T17:08:32,652 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,656 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-23T17:08:32,658 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-23T17:08:32,659 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,659 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-23T17:08:32,660 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381712660"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,660 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381712660"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:08:32,662 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0547da56e62a809fa0c7179ccfb1e23a, NAME => 'testtb-testExportWithResetTtl,,1732381688125.0547da56e62a809fa0c7179ccfb1e23a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fccbf8c10e348af89c4f1d21eb43cdb2, NAME => 'testtb-testExportWithResetTtl,1,1732381688125.fccbf8c10e348af89c4f1d21eb43cdb2.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:08:32,662 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-23T17:08:32,662 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381712662"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:32,664 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-23T17:08:32,665 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:08:32,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 56 msec 2024-11-23T17:08:32,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-23T17:08:32,739 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-23T17:08:32,739 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:08:32,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T17:08:32,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-23T17:08:32,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-23T17:08:32,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-23T17:08:32,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T17:08:32,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-23T17:08:32,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=798 (was 791) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42647 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3164 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:35940 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:44303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 3714) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-711007656_1 at /127.0.0.1:57830 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-711007656_1 at /127.0.0.1:42418 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42647 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:40254 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:55950 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43583 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=597 (was 501) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=3378 (was 3849) 2024-11-23T17:08:32,783 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-23T17:08:32,800 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=798, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=597, ProcessCount=17, AvailableMemoryMB=3377 2024-11-23T17:08:32,800 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-23T17:08:32,802 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:08:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:32,804 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:08:32,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-11-23T17:08:32,806 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:08:32,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:08:32,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742037_1213 (size=443) 2024-11-23T17:08:32,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742037_1213 (size=443) 2024-11-23T17:08:32,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742037_1213 (size=443) 2024-11-23T17:08:32,825 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c9fe899e89cf835ee0cde66ecc1c55d2, NAME => 'testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:32,825 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d3add4368eba27120bde04a6e101d1e6, NAME => 'testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:32,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742038_1214 (size=68) 2024-11-23T17:08:32,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742038_1214 (size=68) 2024-11-23T17:08:32,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742038_1214 (size=68) 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing d3add4368eba27120bde04a6e101d1e6, disabling compactions & flushes 2024-11-23T17:08:32,846 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. after waiting 0 ms 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:32,846 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:32,846 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for d3add4368eba27120bde04a6e101d1e6: Waiting for close lock at 1732381712846Disabling compacts and flushes for region at 1732381712846Disabling writes for close at 1732381712846Writing region close event to WAL at 1732381712846Closed at 1732381712846 2024-11-23T17:08:32,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742039_1215 (size=68) 2024-11-23T17:08:32,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742039_1215 (size=68) 2024-11-23T17:08:32,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742039_1215 (size=68) 2024-11-23T17:08:32,865 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:32,865 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing c9fe899e89cf835ee0cde66ecc1c55d2, disabling compactions & flushes 2024-11-23T17:08:32,865 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:32,866 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:32,866 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. after waiting 0 ms 2024-11-23T17:08:32,866 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:32,866 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:32,866 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for c9fe899e89cf835ee0cde66ecc1c55d2: Waiting for close lock at 1732381712865Disabling compacts and flushes for region at 1732381712865Disabling writes for close at 1732381712866 (+1 ms)Writing region close event to WAL at 1732381712866Closed at 1732381712866 2024-11-23T17:08:32,867 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:08:32,868 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732381712867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381712867"}]},"ts":"1732381712867"} 2024-11-23T17:08:32,868 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732381712867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381712867"}]},"ts":"1732381712867"} 2024-11-23T17:08:32,871 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:08:32,872 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:08:32,873 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381712873"}]},"ts":"1732381712873"} 2024-11-23T17:08:32,875 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T17:08:32,875 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:08:32,877 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:08:32,877 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:08:32,877 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:08:32,877 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:08:32,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, ASSIGN}] 2024-11-23T17:08:32,879 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, ASSIGN 2024-11-23T17:08:32,879 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, ASSIGN 2024-11-23T17:08:32,880 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:08:32,880 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:08:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:08:33,031 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:08:33,031 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=c9fe899e89cf835ee0cde66ecc1c55d2, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:33,031 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=d3add4368eba27120bde04a6e101d1e6, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:33,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, ASSIGN because future has completed 2024-11-23T17:08:33,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:08:33,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, ASSIGN because future has completed 2024-11-23T17:08:33,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:08:33,178 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:08:33,190 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:33,190 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,190 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => c9fe899e89cf835ee0cde66ecc1c55d2, NAME => 'testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:08:33,190 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => d3add4368eba27120bde04a6e101d1e6, NAME => 'testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. service=AccessControlService 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. service=AccessControlService 2024-11-23T17:08:33,191 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:33,191 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,191 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,193 INFO [StoreOpener-c9fe899e89cf835ee0cde66ecc1c55d2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,193 INFO [StoreOpener-d3add4368eba27120bde04a6e101d1e6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,195 INFO [StoreOpener-c9fe899e89cf835ee0cde66ecc1c55d2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c9fe899e89cf835ee0cde66ecc1c55d2 columnFamilyName cf 2024-11-23T17:08:33,195 INFO [StoreOpener-d3add4368eba27120bde04a6e101d1e6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d3add4368eba27120bde04a6e101d1e6 columnFamilyName cf 2024-11-23T17:08:33,196 DEBUG [StoreOpener-c9fe899e89cf835ee0cde66ecc1c55d2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:33,196 DEBUG [StoreOpener-d3add4368eba27120bde04a6e101d1e6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:33,197 INFO [StoreOpener-c9fe899e89cf835ee0cde66ecc1c55d2-1 {}] regionserver.HStore(327): Store=c9fe899e89cf835ee0cde66ecc1c55d2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:33,197 INFO [StoreOpener-d3add4368eba27120bde04a6e101d1e6-1 {}] regionserver.HStore(327): Store=d3add4368eba27120bde04a6e101d1e6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:33,197 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,197 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,198 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,199 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,199 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,199 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,199 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,199 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,200 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,200 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,202 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,202 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,204 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:33,205 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:33,205 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened d3add4368eba27120bde04a6e101d1e6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64046853, jitterRate=-0.045627519488334656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:33,205 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,205 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened c9fe899e89cf835ee0cde66ecc1c55d2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70564893, jitterRate=0.05149884521961212}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:33,205 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,206 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for c9fe899e89cf835ee0cde66ecc1c55d2: Running coprocessor pre-open hook at 1732381713191Writing region info on filesystem at 1732381713192 (+1 ms)Initializing all the Stores at 1732381713192Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381713192Cleaning up temporary data from old regions at 1732381713199 (+7 ms)Running coprocessor post-open hooks at 1732381713205 (+6 ms)Region opened successfully at 1732381713206 (+1 ms) 2024-11-23T17:08:33,206 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for d3add4368eba27120bde04a6e101d1e6: Running coprocessor pre-open hook at 1732381713191Writing region info on filesystem at 1732381713192 (+1 ms)Initializing all the Stores at 1732381713192Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381713192Cleaning up temporary data from old regions at 1732381713200 (+8 ms)Running coprocessor post-open hooks at 1732381713205 (+5 ms)Region opened successfully at 1732381713206 (+1 ms) 2024-11-23T17:08:33,207 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6., pid=100, masterSystemTime=1732381713187 2024-11-23T17:08:33,207 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2., pid=99, masterSystemTime=1732381713186 2024-11-23T17:08:33,209 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,209 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,209 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=d3add4368eba27120bde04a6e101d1e6, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:33,210 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:33,210 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:33,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:33,213 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=c9fe899e89cf835ee0cde66ecc1c55d2, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:33,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:08:33,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-11-23T17:08:33,219 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825 in 181 msec 2024-11-23T17:08:33,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-11-23T17:08:33,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, ASSIGN in 342 msec 2024-11-23T17:08:33,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872 in 184 msec 2024-11-23T17:08:33,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-23T17:08:33,224 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, ASSIGN in 345 msec 2024-11-23T17:08:33,225 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:08:33,225 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381713225"}]},"ts":"1732381713225"} 2024-11-23T17:08:33,227 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T17:08:33,228 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:08:33,228 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-23T17:08:33,232 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:08:33,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:33,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:33,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:33,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:33,238 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:33,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:33,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:33,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:33,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 435 msec 2024-11-23T17:08:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:08:33,438 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:08:33,439 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,442 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-23T17:08:33,442 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,443 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:33,444 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,451 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,457 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:08:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381713460 (current time:1732381713460). 2024-11-23T17:08:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:08:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:33,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2907c93c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:33,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:33,463 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:33,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:33,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:33,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9a7e8c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:33,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:33,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,465 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53480, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:33,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bf6366e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:33,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:33,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:33,468 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36846, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:33,469 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,469 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10a6d522, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:33,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:33,471 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:33,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:33,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:33,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13bf2496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:33,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:33,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,472 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53498, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:33,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cc847eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:33,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:33,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:33,475 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36860, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:33,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:33,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,478 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:33,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:08:33,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:08:33,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T17:08:33,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:08:33,482 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:33,483 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:33,485 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742040_1216 (size=170) 2024-11-23T17:08:33,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742040_1216 (size=170) 2024-11-23T17:08:33,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742040_1216 (size=170) 2024-11-23T17:08:33,501 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:33,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2}] 2024-11-23T17:08:33,502 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,503 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:08:33,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-23T17:08:33,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-11-23T17:08:33,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for d3add4368eba27120bde04a6e101d1e6: 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for c9fe899e89cf835ee0cde66ecc1c55d2: 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:33,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:33,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742041_1217 (size=71) 2024-11-23T17:08:33,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742041_1217 (size=71) 2024-11-23T17:08:33,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742041_1217 (size=71) 2024-11-23T17:08:33,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,667 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-11-23T17:08:33,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-11-23T17:08:33,668 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,668 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 in 168 msec 2024-11-23T17:08:33,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742042_1218 (size=71) 2024-11-23T17:08:33,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742042_1218 (size=71) 2024-11-23T17:08:33,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742042_1218 (size=71) 2024-11-23T17:08:33,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:33,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-23T17:08:33,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-23T17:08:33,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-11-23T17:08:33,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 in 192 msec 2024-11-23T17:08:33,696 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:33,698 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:33,699 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:33,699 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:33,699 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:33,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:08:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742043_1219 (size=63) 2024-11-23T17:08:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742043_1219 (size=63) 2024-11-23T17:08:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742043_1219 (size=63) 2024-11-23T17:08:33,718 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:33,718 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:33,719 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:33,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742044_1220 (size=653) 2024-11-23T17:08:33,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742044_1220 (size=653) 2024-11-23T17:08:33,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742044_1220 (size=653) 2024-11-23T17:08:33,742 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:33,750 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:33,751 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:33,752 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:33,752 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T17:08:33,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 273 msec 2024-11-23T17:08:33,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:08:33,798 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:08:33,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:33,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:33,811 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,814 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-23T17:08:33,815 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:33,815 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:33,817 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,823 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,830 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:08:33,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:08:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381713834 (current time:1732381713834). 2024-11-23T17:08:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:08:33,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b752839, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:33,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:33,836 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:33,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:33,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:33,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d52f9a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:33,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:33,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,838 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53520, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:33,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71fcff5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:33,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:33,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:33,840 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36872, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:33,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:33,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:33,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,842 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bc44d93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:33,844 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67799221, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:33,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,845 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53532, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:33,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@344fb52a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:33,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:33,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:33,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:33,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36884, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:33,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:33,852 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:33,852 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:33,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:08:33,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:08:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T17:08:33,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:08:33,855 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:33,856 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:33,859 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:33,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742045_1221 (size=165) 2024-11-23T17:08:33,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742045_1221 (size=165) 2024-11-23T17:08:33,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742045_1221 (size=165) 2024-11-23T17:08:33,877 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:33,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2}] 2024-11-23T17:08:33,878 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:33,878 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:33,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:08:34,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-11-23T17:08:34,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-11-23T17:08:34,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:34,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:34,031 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing d3add4368eba27120bde04a6e101d1e6 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T17:08:34,031 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing c9fe899e89cf835ee0cde66ecc1c55d2 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T17:08:34,057 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 is 71, key is 0c2664984baeb192018fe2ccb9ff407c/cf:q/1732381713807/Put/seqid=0 2024-11-23T17:08:34,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 is 71, key is 13bcbc8f964fab78379c0d0c65c8a3b9/cf:q/1732381713809/Put/seqid=0 2024-11-23T17:08:34,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742046_1222 (size=5102) 2024-11-23T17:08:34,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742046_1222 (size=5102) 2024-11-23T17:08:34,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742046_1222 (size=5102) 2024-11-23T17:08:34,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:34,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:34,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/.tmp/cf/eded1c0a93214c92992e2c6842e51f82, store: [table=testtb-testExportFileSystemState family=cf region=d3add4368eba27120bde04a6e101d1e6] 2024-11-23T17:08:34,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/.tmp/cf/eded1c0a93214c92992e2c6842e51f82 is 209, key is 03549e658aba29e84614460e972c8dc9c/cf:q/1732381713807/Put/seqid=0 2024-11-23T17:08:34,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742047_1223 (size=8171) 2024-11-23T17:08:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742047_1223 (size=8171) 2024-11-23T17:08:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742047_1223 (size=8171) 2024-11-23T17:08:34,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742048_1224 (size=5918) 2024-11-23T17:08:34,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742048_1224 (size=5918) 2024-11-23T17:08:34,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742048_1224 (size=5918) 2024-11-23T17:08:34,095 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/.tmp/cf/eded1c0a93214c92992e2c6842e51f82 2024-11-23T17:08:34,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:34,110 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:34,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/.tmp/cf/eded1c0a93214c92992e2c6842e51f82 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82 2024-11-23T17:08:34,118 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82, entries=3, sequenceid=6, filesize=5.8 K 2024-11-23T17:08:34,119 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d3add4368eba27120bde04a6e101d1e6 in 88ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:34,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-23T17:08:34,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for d3add4368eba27120bde04a6e101d1e6: 2024-11-23T17:08:34,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. for snaptb0-testExportFileSystemState completed. 2024-11-23T17:08:34,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:08:34,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:34,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82] hfiles 2024-11-23T17:08:34,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82 for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:08:34,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/.tmp/cf/c57bc021fc0b46fe8cc7c627776f795a, store: [table=testtb-testExportFileSystemState family=cf region=c9fe899e89cf835ee0cde66ecc1c55d2] 2024-11-23T17:08:34,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/.tmp/cf/c57bc021fc0b46fe8cc7c627776f795a is 209, key is 19d3463c3792921168fd37bb6e7b2c2e5/cf:q/1732381713809/Put/seqid=0 2024-11-23T17:08:34,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742049_1225 (size=110) 2024-11-23T17:08:34,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742049_1225 (size=110) 2024-11-23T17:08:34,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742049_1225 (size=110) 2024-11-23T17:08:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742050_1226 (size=14997) 2024-11-23T17:08:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742050_1226 (size=14997) 2024-11-23T17:08:34,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742050_1226 (size=14997) 2024-11-23T17:08:34,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:34,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-23T17:08:34,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/.tmp/cf/c57bc021fc0b46fe8cc7c627776f795a 2024-11-23T17:08:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-11-23T17:08:34,145 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:34,145 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:34,150 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d3add4368eba27120bde04a6e101d1e6 in 269 msec 2024-11-23T17:08:34,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/.tmp/cf/c57bc021fc0b46fe8cc7c627776f795a as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a 2024-11-23T17:08:34,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a, entries=47, sequenceid=6, filesize=14.6 K 2024-11-23T17:08:34,162 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for c9fe899e89cf835ee0cde66ecc1c55d2 in 131ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for c9fe899e89cf835ee0cde66ecc1c55d2: 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. for snaptb0-testExportFileSystemState completed. 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a] hfiles 2024-11-23T17:08:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:08:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742051_1227 (size=110) 2024-11-23T17:08:34,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:08:34,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742051_1227 (size=110) 2024-11-23T17:08:34,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742051_1227 (size=110) 2024-11-23T17:08:34,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:34,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-11-23T17:08:34,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-11-23T17:08:34,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:34,170 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:34,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-11-23T17:08:34,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2 in 294 msec 2024-11-23T17:08:34,173 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:34,174 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:34,175 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:34,175 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:34,175 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:34,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6] hfiles 2024-11-23T17:08:34,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:34,176 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:34,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742052_1228 (size=294) 2024-11-23T17:08:34,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742052_1228 (size=294) 2024-11-23T17:08:34,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742052_1228 (size=294) 2024-11-23T17:08:34,190 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:34,190 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-23T17:08:34,190 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:08:34,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742053_1229 (size=963) 2024-11-23T17:08:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742053_1229 (size=963) 2024-11-23T17:08:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742053_1229 (size=963) 2024-11-23T17:08:34,206 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:34,211 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:34,212 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:08:34,213 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:34,213 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T17:08:34,214 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 360 msec 2024-11-23T17:08:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T17:08:34,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T17:08:34,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T17:08:34,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T17:08:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:08:34,479 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:08:34,479 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479 2024-11-23T17:08:34,479 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:34,508 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:34,508 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:08:34,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:08:34,516 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:08:34,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742054_1230 (size=165) 2024-11-23T17:08:34,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742054_1230 (size=165) 2024-11-23T17:08:34,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742054_1230 (size=165) 2024-11-23T17:08:34,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742055_1231 (size=963) 2024-11-23T17:08:34,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742055_1231 (size=963) 2024-11-23T17:08:34,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742055_1231 (size=963) 2024-11-23T17:08:34,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:34,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:34,561 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:34,769 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000004/launch_container.sh] 2024-11-23T17:08:34,770 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000004/container_tokens] 2024-11-23T17:08:34,770 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000004/sysfs] 2024-11-23T17:08:35,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-8539591663184798492.jar 2024-11-23T17:08:35,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-16960510947438174177.jar 2024-11-23T17:08:35,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:35,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:08:35,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:08:35,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:08:35,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:08:35,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:08:35,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:08:35,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:08:35,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:08:35,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:08:35,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:08:35,838 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:08:35,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:35,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:35,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:35,839 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:35,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:35,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:35,840 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:35,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742056_1232 (size=24020) 2024-11-23T17:08:35,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742056_1232 (size=24020) 2024-11-23T17:08:35,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742056_1232 (size=24020) 2024-11-23T17:08:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742057_1233 (size=77755) 2024-11-23T17:08:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742057_1233 (size=77755) 2024-11-23T17:08:35,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742057_1233 (size=77755) 2024-11-23T17:08:35,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742058_1234 (size=131360) 2024-11-23T17:08:35,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742058_1234 (size=131360) 2024-11-23T17:08:35,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742058_1234 (size=131360) 2024-11-23T17:08:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742059_1235 (size=111793) 2024-11-23T17:08:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742059_1235 (size=111793) 2024-11-23T17:08:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742059_1235 (size=111793) 2024-11-23T17:08:35,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742060_1236 (size=1832290) 2024-11-23T17:08:35,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742060_1236 (size=1832290) 2024-11-23T17:08:35,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742060_1236 (size=1832290) 2024-11-23T17:08:35,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742061_1237 (size=8360005) 2024-11-23T17:08:35,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742061_1237 (size=8360005) 2024-11-23T17:08:35,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742061_1237 (size=8360005) 2024-11-23T17:08:35,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742062_1238 (size=503880) 2024-11-23T17:08:35,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742062_1238 (size=503880) 2024-11-23T17:08:35,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742062_1238 (size=503880) 2024-11-23T17:08:35,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742063_1239 (size=322274) 2024-11-23T17:08:35,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742063_1239 (size=322274) 2024-11-23T17:08:35,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742063_1239 (size=322274) 2024-11-23T17:08:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742064_1240 (size=20406) 2024-11-23T17:08:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742064_1240 (size=20406) 2024-11-23T17:08:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742064_1240 (size=20406) 2024-11-23T17:08:35,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742065_1241 (size=45609) 2024-11-23T17:08:35,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742065_1241 (size=45609) 2024-11-23T17:08:35,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742065_1241 (size=45609) 2024-11-23T17:08:36,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742066_1242 (size=136454) 2024-11-23T17:08:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742066_1242 (size=136454) 2024-11-23T17:08:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742066_1242 (size=136454) 2024-11-23T17:08:36,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742067_1243 (size=1597270) 2024-11-23T17:08:36,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742067_1243 (size=1597270) 2024-11-23T17:08:36,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742067_1243 (size=1597270) 2024-11-23T17:08:36,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742068_1244 (size=30873) 2024-11-23T17:08:36,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742068_1244 (size=30873) 2024-11-23T17:08:36,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742068_1244 (size=30873) 2024-11-23T17:08:36,026 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0003_000001 (auth:SIMPLE) from 127.0.0.1:34326 2024-11-23T17:08:36,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742069_1245 (size=29229) 2024-11-23T17:08:36,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742069_1245 (size=29229) 2024-11-23T17:08:36,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742069_1245 (size=29229) 2024-11-23T17:08:36,039 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000001/launch_container.sh] 2024-11-23T17:08:36,039 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000001/container_tokens] 2024-11-23T17:08:36,039 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0003/container_1732381541272_0003_01_000001/sysfs] 2024-11-23T17:08:36,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742070_1246 (size=903661) 2024-11-23T17:08:36,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742070_1246 (size=903661) 2024-11-23T17:08:36,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742070_1246 (size=903661) 2024-11-23T17:08:36,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742071_1247 (size=440956) 2024-11-23T17:08:36,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742071_1247 (size=440956) 2024-11-23T17:08:36,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742071_1247 (size=440956) 2024-11-23T17:08:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742072_1248 (size=5175431) 2024-11-23T17:08:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742072_1248 (size=5175431) 2024-11-23T17:08:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742072_1248 (size=5175431) 2024-11-23T17:08:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742073_1249 (size=232881) 2024-11-23T17:08:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742073_1249 (size=232881) 2024-11-23T17:08:36,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742073_1249 (size=232881) 2024-11-23T17:08:36,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742074_1250 (size=1323991) 2024-11-23T17:08:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742074_1250 (size=1323991) 2024-11-23T17:08:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742074_1250 (size=1323991) 2024-11-23T17:08:36,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742075_1251 (size=4695811) 2024-11-23T17:08:36,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742075_1251 (size=4695811) 2024-11-23T17:08:36,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742075_1251 (size=4695811) 2024-11-23T17:08:36,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742076_1252 (size=1877034) 2024-11-23T17:08:36,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742076_1252 (size=1877034) 2024-11-23T17:08:36,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742076_1252 (size=1877034) 2024-11-23T17:08:36,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742077_1253 (size=6424742) 2024-11-23T17:08:36,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742077_1253 (size=6424742) 2024-11-23T17:08:36,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742077_1253 (size=6424742) 2024-11-23T17:08:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742078_1254 (size=217555) 2024-11-23T17:08:36,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742078_1254 (size=217555) 2024-11-23T17:08:36,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742078_1254 (size=217555) 2024-11-23T17:08:36,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742079_1255 (size=4188619) 2024-11-23T17:08:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742079_1255 (size=4188619) 2024-11-23T17:08:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742079_1255 (size=4188619) 2024-11-23T17:08:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742080_1256 (size=127628) 2024-11-23T17:08:36,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742080_1256 (size=127628) 2024-11-23T17:08:36,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742080_1256 (size=127628) 2024-11-23T17:08:36,179 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:08:36,180 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-23T17:08:36,182 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.6 K 2024-11-23T17:08:36,182 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-23T17:08:36,182 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-23T17:08:36,182 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-23T17:08:36,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742081_1257 (size=1035) 2024-11-23T17:08:36,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742081_1257 (size=1035) 2024-11-23T17:08:36,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742081_1257 (size=1035) 2024-11-23T17:08:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742082_1258 (size=35) 2024-11-23T17:08:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742082_1258 (size=35) 2024-11-23T17:08:36,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742082_1258 (size=35) 2024-11-23T17:08:36,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742083_1259 (size=304085) 2024-11-23T17:08:36,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742083_1259 (size=304085) 2024-11-23T17:08:36,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742083_1259 (size=304085) 2024-11-23T17:08:36,225 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:36,225 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:36,653 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:57800 2024-11-23T17:08:37,549 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:08:42,446 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d3add4368eba27120bde04a6e101d1e6 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:08:42,446 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c9fe899e89cf835ee0cde66ecc1c55d2 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:08:42,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:51876 2024-11-23T17:08:43,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742084_1260 (size=349783) 2024-11-23T17:08:43,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742084_1260 (size=349783) 2024-11-23T17:08:43,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742084_1260 (size=349783) 2024-11-23T17:08:45,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:55858 2024-11-23T17:08:45,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:41596 2024-11-23T17:08:46,013 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:41612 2024-11-23T17:08:46,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:55866 2024-11-23T17:08:48,027 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0004_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:08:50,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742085_1261 (size=8171) 2024-11-23T17:08:50,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742085_1261 (size=8171) 2024-11-23T17:08:50,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742085_1261 (size=8171) 2024-11-23T17:08:51,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742087_1263 (size=14997) 2024-11-23T17:08:51,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742087_1263 (size=14997) 2024-11-23T17:08:51,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742087_1263 (size=14997) 2024-11-23T17:08:52,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000002/launch_container.sh] 2024-11-23T17:08:52,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000002/container_tokens] 2024-11-23T17:08:52,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000002/sysfs] 2024-11-23T17:08:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742088_1264 (size=5918) 2024-11-23T17:08:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742088_1264 (size=5918) 2024-11-23T17:08:52,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742088_1264 (size=5918) 2024-11-23T17:08:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742089_1265 (size=5102) 2024-11-23T17:08:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742089_1265 (size=5102) 2024-11-23T17:08:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742089_1265 (size=5102) 2024-11-23T17:08:52,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742086_1262 (size=31748) 2024-11-23T17:08:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742086_1262 (size=31748) 2024-11-23T17:08:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742086_1262 (size=31748) 2024-11-23T17:08:52,331 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000004/launch_container.sh] 2024-11-23T17:08:52,331 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000004/container_tokens] 2024-11-23T17:08:52,331 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000004/sysfs] 2024-11-23T17:08:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742090_1266 (size=466) 2024-11-23T17:08:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742090_1266 (size=466) 2024-11-23T17:08:52,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742090_1266 (size=466) 2024-11-23T17:08:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742091_1267 (size=31748) 2024-11-23T17:08:52,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742091_1267 (size=31748) 2024-11-23T17:08:52,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742091_1267 (size=31748) 2024-11-23T17:08:52,386 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000005/launch_container.sh] 2024-11-23T17:08:52,386 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000005/container_tokens] 2024-11-23T17:08:52,386 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000005/sysfs] 2024-11-23T17:08:52,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742092_1268 (size=349783) 2024-11-23T17:08:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742092_1268 (size=349783) 2024-11-23T17:08:52,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742092_1268 (size=349783) 2024-11-23T17:08:52,401 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:54124 2024-11-23T17:08:52,407 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:54126 2024-11-23T17:08:52,412 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:34460 2024-11-23T17:08:54,380 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:08:54,382 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:08:54,393 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-11-23T17:08:54,393 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:08:54,394 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:08:54,394 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:08:54,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T17:08:54,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T17:08:54,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:08:54,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T17:08:54,395 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381714479/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T17:08:54,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-23T17:08:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:08:54,408 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381734408"}]},"ts":"1732381734408"} 2024-11-23T17:08:54,411 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T17:08:54,411 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-23T17:08:54,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-23T17:08:54,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, UNASSIGN}] 2024-11-23T17:08:54,415 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, UNASSIGN 2024-11-23T17:08:54,415 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, UNASSIGN 2024-11-23T17:08:54,416 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=c9fe899e89cf835ee0cde66ecc1c55d2, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:08:54,416 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=d3add4368eba27120bde04a6e101d1e6, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:54,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, UNASSIGN because future has completed 2024-11-23T17:08:54,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, UNASSIGN because future has completed 2024-11-23T17:08:54,421 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:54,421 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:08:54,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:08:54,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:54,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:08:54,575 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:54,575 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing c9fe899e89cf835ee0cde66ecc1c55d2, disabling compactions & flushes 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing d3add4368eba27120bde04a6e101d1e6, disabling compactions & flushes 2024-11-23T17:08:54,575 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:54,575 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:54,575 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:54,576 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. after waiting 0 ms 2024-11-23T17:08:54,576 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. after waiting 0 ms 2024-11-23T17:08:54,576 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:54,576 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:54,581 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:54,582 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:54,582 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2. 2024-11-23T17:08:54,582 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for c9fe899e89cf835ee0cde66ecc1c55d2: Waiting for close lock at 1732381734575Running coprocessor pre-close hooks at 1732381734575Disabling compacts and flushes for region at 1732381734575Disabling writes for close at 1732381734576 (+1 ms)Writing region close event to WAL at 1732381734576Running coprocessor post-close hooks at 1732381734582 (+6 ms)Closed at 1732381734582 2024-11-23T17:08:54,585 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:54,586 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=c9fe899e89cf835ee0cde66ecc1c55d2, regionState=CLOSED 2024-11-23T17:08:54,587 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:08:54,588 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:08:54,588 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6. 2024-11-23T17:08:54,588 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for d3add4368eba27120bde04a6e101d1e6: Waiting for close lock at 1732381734575Running coprocessor pre-close hooks at 1732381734575Disabling compacts and flushes for region at 1732381734575Disabling writes for close at 1732381734576 (+1 ms)Writing region close event to WAL at 1732381734577 (+1 ms)Running coprocessor post-close hooks at 1732381734588 (+11 ms)Closed at 1732381734588 2024-11-23T17:08:54,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:08:54,590 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:54,591 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=d3add4368eba27120bde04a6e101d1e6, regionState=CLOSED 2024-11-23T17:08:54,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-23T17:08:54,593 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure c9fe899e89cf835ee0cde66ecc1c55d2, server=1a7c5470b472,44711,1732381534872 in 168 msec 2024-11-23T17:08:54,594 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=c9fe899e89cf835ee0cde66ecc1c55d2, UNASSIGN in 179 msec 2024-11-23T17:08:54,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:54,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-11-23T17:08:54,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure d3add4368eba27120bde04a6e101d1e6, server=1a7c5470b472,43453,1732381534825 in 176 msec 2024-11-23T17:08:54,600 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-11-23T17:08:54,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d3add4368eba27120bde04a6e101d1e6, UNASSIGN in 185 msec 2024-11-23T17:08:54,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-23T17:08:54,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 190 msec 2024-11-23T17:08:54,604 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381734603"}]},"ts":"1732381734603"} 2024-11-23T17:08:54,605 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T17:08:54,605 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-23T17:08:54,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 204 msec 2024-11-23T17:08:54,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:08:54,729 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:08:54,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-23T17:08:54,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,732 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-23T17:08:54,733 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,735 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-23T17:08:54,737 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:54,737 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:54,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:08:54,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:08:54,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:08:54,740 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:08:54,741 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/recovered.edits] 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:08:54,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:54,742 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/recovered.edits] 2024-11-23T17:08:54,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-23T17:08:54,748 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/cf/eded1c0a93214c92992e2c6842e51f82 2024-11-23T17:08:54,749 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/cf/c57bc021fc0b46fe8cc7c627776f795a 2024-11-23T17:08:54,751 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6/recovered.edits/9.seqid 2024-11-23T17:08:54,752 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:54,757 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2/recovered.edits/9.seqid 2024-11-23T17:08:54,758 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemState/c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:54,758 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-23T17:08:54,759 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-23T17:08:54,760 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-23T17:08:54,764 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b2024112374ddf72dfaa6484ca33c72ace08fcfa2_c9fe899e89cf835ee0cde66ecc1c55d2 2024-11-23T17:08:54,765 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241123cb49aebc1da94ba3b514088aa9ddb935_d3add4368eba27120bde04a6e101d1e6 2024-11-23T17:08:54,765 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-23T17:08:54,767 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,770 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-23T17:08:54,772 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-23T17:08:54,773 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,773 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-23T17:08:54,773 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381734773"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:54,773 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381734773"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:54,775 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:08:54,775 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d3add4368eba27120bde04a6e101d1e6, NAME => 'testtb-testExportFileSystemState,,1732381712801.d3add4368eba27120bde04a6e101d1e6.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c9fe899e89cf835ee0cde66ecc1c55d2, NAME => 'testtb-testExportFileSystemState,1,1732381712801.c9fe899e89cf835ee0cde66ecc1c55d2.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:08:54,775 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-23T17:08:54,775 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381734775"}]},"ts":"9223372036854775807"} 2024-11-23T17:08:54,777 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-23T17:08:54,777 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:08:54,778 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 48 msec 2024-11-23T17:08:54,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-23T17:08:54,848 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-23T17:08:54,848 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:08:54,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T17:08:54,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-23T17:08:54,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T17:08:54,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-23T17:08:54,881 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=807 (was 798) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:54038 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43467 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 7282) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:44334 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:39298 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:43467 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1562805255_1 at /127.0.0.1:44300 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4007 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=812 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=685 (was 597) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=3199 (was 3377) 2024-11-23T17:08:54,881 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-23T17:08:54,898 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=807, OpenFileDescriptor=812, MaxFileDescriptor=1048576, SystemLoadAverage=685, ProcessCount=17, AvailableMemoryMB=3196 2024-11-23T17:08:54,898 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=807 is superior to 500 2024-11-23T17:08:54,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:08:54,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:08:54,902 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:08:54,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-11-23T17:08:54,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:08:54,904 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:08:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742093_1269 (size=440) 2024-11-23T17:08:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742093_1269 (size=440) 2024-11-23T17:08:54,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742093_1269 (size=440) 2024-11-23T17:08:54,917 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ff9440d2c018d6341465fa04c5e3e803, NAME => 'testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:54,917 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f33c28667536ddadb93b5f1f7ffa6b91, NAME => 'testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742094_1270 (size=65) 2024-11-23T17:08:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742094_1270 (size=65) 2024-11-23T17:08:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742094_1270 (size=65) 2024-11-23T17:08:54,942 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:54,943 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing ff9440d2c018d6341465fa04c5e3e803, disabling compactions & flushes 2024-11-23T17:08:54,943 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:54,943 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:54,943 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. after waiting 0 ms 2024-11-23T17:08:54,943 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:54,943 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:54,943 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for ff9440d2c018d6341465fa04c5e3e803: Waiting for close lock at 1732381734943Disabling compacts and flushes for region at 1732381734943Disabling writes for close at 1732381734943Writing region close event to WAL at 1732381734943Closed at 1732381734943 2024-11-23T17:08:54,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742095_1271 (size=65) 2024-11-23T17:08:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742095_1271 (size=65) 2024-11-23T17:08:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742095_1271 (size=65) 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing f33c28667536ddadb93b5f1f7ffa6b91, disabling compactions & flushes 2024-11-23T17:08:54,945 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. after waiting 0 ms 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:54,945 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:54,945 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for f33c28667536ddadb93b5f1f7ffa6b91: Waiting for close lock at 1732381734945Disabling compacts and flushes for region at 1732381734945Disabling writes for close at 1732381734945Writing region close event to WAL at 1732381734945Closed at 1732381734945 2024-11-23T17:08:54,946 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:08:54,946 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381734946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381734946"}]},"ts":"1732381734946"} 2024-11-23T17:08:54,947 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381734946"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381734946"}]},"ts":"1732381734946"} 2024-11-23T17:08:54,949 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:08:54,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:08:54,950 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381734950"}]},"ts":"1732381734950"} 2024-11-23T17:08:54,951 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-23T17:08:54,952 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:08:54,953 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:08:54,953 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:08:54,953 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:08:54,953 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:08:54,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, ASSIGN}] 2024-11-23T17:08:54,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, ASSIGN 2024-11-23T17:08:54,954 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, ASSIGN 2024-11-23T17:08:54,955 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:08:54,955 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:08:55,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:08:55,105 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:08:55,106 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=f33c28667536ddadb93b5f1f7ffa6b91, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:55,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ff9440d2c018d6341465fa04c5e3e803, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:55,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, ASSIGN because future has completed 2024-11-23T17:08:55,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:08:55,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, ASSIGN because future has completed 2024-11-23T17:08:55,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:08:55,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:08:55,263 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:55,263 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => f33c28667536ddadb93b5f1f7ffa6b91, NAME => 'testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:08:55,264 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. service=AccessControlService 2024-11-23T17:08:55,264 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:55,264 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,264 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:55,264 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,264 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,265 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,265 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => ff9440d2c018d6341465fa04c5e3e803, NAME => 'testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:08:55,265 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. service=AccessControlService 2024-11-23T17:08:55,265 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:08:55,265 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,265 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:08:55,265 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,266 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,266 INFO [StoreOpener-f33c28667536ddadb93b5f1f7ffa6b91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,267 INFO [StoreOpener-ff9440d2c018d6341465fa04c5e3e803-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,267 INFO [StoreOpener-f33c28667536ddadb93b5f1f7ffa6b91-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f33c28667536ddadb93b5f1f7ffa6b91 columnFamilyName cf 2024-11-23T17:08:55,268 INFO [StoreOpener-ff9440d2c018d6341465fa04c5e3e803-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff9440d2c018d6341465fa04c5e3e803 columnFamilyName cf 2024-11-23T17:08:55,268 DEBUG [StoreOpener-f33c28667536ddadb93b5f1f7ffa6b91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:55,269 DEBUG [StoreOpener-ff9440d2c018d6341465fa04c5e3e803-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:55,269 INFO [StoreOpener-f33c28667536ddadb93b5f1f7ffa6b91-1 {}] regionserver.HStore(327): Store=f33c28667536ddadb93b5f1f7ffa6b91/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:55,269 INFO [StoreOpener-ff9440d2c018d6341465fa04c5e3e803-1 {}] regionserver.HStore(327): Store=ff9440d2c018d6341465fa04c5e3e803/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:08:55,269 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,269 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,270 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,270 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,270 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,270 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,271 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,271 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,271 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,271 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,272 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,273 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,274 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:55,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:08:55,275 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened ff9440d2c018d6341465fa04c5e3e803; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71747221, jitterRate=0.06911690533161163}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:55,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,275 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened f33c28667536ddadb93b5f1f7ffa6b91; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63945614, jitterRate=-0.0471360981464386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:08:55,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for ff9440d2c018d6341465fa04c5e3e803: Running coprocessor pre-open hook at 1732381735266Writing region info on filesystem at 1732381735266Initializing all the Stores at 1732381735266Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381735266Cleaning up temporary data from old regions at 1732381735271 (+5 ms)Running coprocessor post-open hooks at 1732381735275 (+4 ms)Region opened successfully at 1732381735275 2024-11-23T17:08:55,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for f33c28667536ddadb93b5f1f7ffa6b91: Running coprocessor pre-open hook at 1732381735265Writing region info on filesystem at 1732381735265Initializing all the Stores at 1732381735265Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381735265Cleaning up temporary data from old regions at 1732381735271 (+6 ms)Running coprocessor post-open hooks at 1732381735275 (+4 ms)Region opened successfully at 1732381735275 2024-11-23T17:08:55,276 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803., pid=118, masterSystemTime=1732381735261 2024-11-23T17:08:55,276 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91., pid=117, masterSystemTime=1732381735260 2024-11-23T17:08:55,284 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,284 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=ff9440d2c018d6341465fa04c5e3e803, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:08:55,299 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=f33c28667536ddadb93b5f1f7ffa6b91, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:08:55,299 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:55,299 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:55,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:08:55,305 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=1a7c5470b472,37875,1732381534675, table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:08:55,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:08:55,310 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-11-23T17:08:55,310 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675 in 197 msec 2024-11-23T17:08:55,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, ASSIGN in 357 msec 2024-11-23T17:08:55,317 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-11-23T17:08:55,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825 in 201 msec 2024-11-23T17:08:55,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-11-23T17:08:55,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, ASSIGN in 365 msec 2024-11-23T17:08:55,321 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:08:55,321 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381735321"}]},"ts":"1732381735321"} 2024-11-23T17:08:55,323 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-23T17:08:55,324 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:08:55,325 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-23T17:08:55,328 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:08:55,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:55,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:55,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:55,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:08:55,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:55,334 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:55,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:55,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:08:55,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 434 msec 2024-11-23T17:08:55,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:08:55,528 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:08:55,528 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,532 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-23T17:08:55,532 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,532 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:55,534 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,545 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:08:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381735547 (current time:1732381735547). 2024-11-23T17:08:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T17:08:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c6dd23d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:55,548 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d3a03b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:55,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,550 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:55,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46128625, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:55,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:55,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:55,553 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46478, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:55,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,554 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40685b3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:55,556 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:55,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:55,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:55,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@505a3e57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:55,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:55,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,558 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56620, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:55,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@383b9175, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:55,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:55,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:55,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46484, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:55,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:55,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,564 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:08:55,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:08:55,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T17:08:55,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:08:55,568 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:55,569 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:55,571 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742096_1272 (size=161) 2024-11-23T17:08:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742096_1272 (size=161) 2024-11-23T17:08:55,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742096_1272 (size=161) 2024-11-23T17:08:55,584 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:55,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91}] 2024-11-23T17:08:55,585 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,585 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,654 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000003/launch_container.sh] 2024-11-23T17:08:55,654 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000003/container_tokens] 2024-11-23T17:08:55,654 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000003/sysfs] 2024-11-23T17:08:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:08:55,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-11-23T17:08:55,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-11-23T17:08:55,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for f33c28667536ddadb93b5f1f7ffa6b91: 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for ff9440d2c018d6341465fa04c5e3e803: 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:55,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:08:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742097_1273 (size=68) 2024-11-23T17:08:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742097_1273 (size=68) 2024-11-23T17:08:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742097_1273 (size=68) 2024-11-23T17:08:55,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-11-23T17:08:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-11-23T17:08:55,754 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,754 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 in 171 msec 2024-11-23T17:08:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742098_1274 (size=68) 2024-11-23T17:08:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742098_1274 (size=68) 2024-11-23T17:08:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742098_1274 (size=68) 2024-11-23T17:08:55,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:55,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-23T17:08:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-11-23T17:08:55,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,763 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:55,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-11-23T17:08:55,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 in 180 msec 2024-11-23T17:08:55,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:55,767 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:55,768 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:55,768 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:55,768 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:55,768 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:08:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742099_1275 (size=60) 2024-11-23T17:08:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742099_1275 (size=60) 2024-11-23T17:08:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742099_1275 (size=60) 2024-11-23T17:08:55,778 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:55,778 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-23T17:08:55,779 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-23T17:08:55,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742100_1276 (size=641) 2024-11-23T17:08:55,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742100_1276 (size=641) 2024-11-23T17:08:55,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742100_1276 (size=641) 2024-11-23T17:08:55,817 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:55,822 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:55,823 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-23T17:08:55,824 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:55,824 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T17:08:55,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 259 msec 2024-11-23T17:08:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:08:55,889 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:08:55,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:55,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:08:55,899 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,901 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-23T17:08:55,901 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:55,901 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:08:55,903 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,908 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,913 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:08:55,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:08:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381735915 (current time:1732381735915). 2024-11-23T17:08:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:08:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T17:08:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:08:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11e7a33b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:55,917 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:55,917 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-23T17:08:55,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:55,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:55,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@304c297d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:55,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:55,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,918 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56632, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:55,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f46f054, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:55,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:55,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:55,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46488, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:55,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,922 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15f64a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:08:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:08:55,924 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fb59f2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:08:55,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,925 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56646, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:08:55,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@276667a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:08:55,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:08:55,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:08:55,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:08:55,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46500, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:08:55,929 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:08:55,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:08:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:08:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:08:55,930 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:08:55,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:08:55,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:08:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:08:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T17:08:55,933 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:08:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:08:55,934 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:08:55,936 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:08:55,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742101_1277 (size=156) 2024-11-23T17:08:55,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742101_1277 (size=156) 2024-11-23T17:08:55,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742101_1277 (size=156) 2024-11-23T17:08:55,943 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:08:55,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91}] 2024-11-23T17:08:55,944 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:55,944 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:08:56,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-11-23T17:08:56,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-11-23T17:08:56,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:56,097 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing f33c28667536ddadb93b5f1f7ffa6b91 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-23T17:08:56,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:56,097 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing ff9440d2c018d6341465fa04c5e3e803 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-23T17:08:56,117 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 is 71, key is 01b1ddcc2c665175b8925af78e652466/cf:q/1732381735896/Put/seqid=0 2024-11-23T17:08:56,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 is 71, key is 20943505c8f2338979fb023f4f1f75be/cf:q/1732381735897/Put/seqid=0 2024-11-23T17:08:56,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742102_1278 (size=5381) 2024-11-23T17:08:56,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742102_1278 (size=5381) 2024-11-23T17:08:56,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742102_1278 (size=5381) 2024-11-23T17:08:56,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:56,138 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:56,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/.tmp/cf/2c60132add8443f2ac5db06c4c1b63e5, store: [table=testtb-testConsecutiveExports family=cf region=ff9440d2c018d6341465fa04c5e3e803] 2024-11-23T17:08:56,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/.tmp/cf/2c60132add8443f2ac5db06c4c1b63e5 is 206, key is 0dd587aa20ff952cffffd253dfcb1518f/cf:q/1732381735896/Put/seqid=0 2024-11-23T17:08:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742103_1279 (size=7892) 2024-11-23T17:08:56,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742103_1279 (size=7892) 2024-11-23T17:08:56,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742103_1279 (size=7892) 2024-11-23T17:08:56,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:56,154 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:56,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/.tmp/cf/0eb405eac3da4b37acc39065c648d2e4, store: [table=testtb-testConsecutiveExports family=cf region=f33c28667536ddadb93b5f1f7ffa6b91] 2024-11-23T17:08:56,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/.tmp/cf/0eb405eac3da4b37acc39065c648d2e4 is 206, key is 166c2d4720d844c453dedce7ade9b1f81/cf:q/1732381735897/Put/seqid=0 2024-11-23T17:08:56,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742104_1280 (size=6720) 2024-11-23T17:08:56,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742104_1280 (size=6720) 2024-11-23T17:08:56,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742104_1280 (size=6720) 2024-11-23T17:08:56,157 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=467, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/.tmp/cf/2c60132add8443f2ac5db06c4c1b63e5 2024-11-23T17:08:56,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742105_1281 (size=14047) 2024-11-23T17:08:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742105_1281 (size=14047) 2024-11-23T17:08:56,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742105_1281 (size=14047) 2024-11-23T17:08:56,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/.tmp/cf/2c60132add8443f2ac5db06c4c1b63e5 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5 2024-11-23T17:08:56,168 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5, entries=7, sequenceid=6, filesize=6.6 K 2024-11-23T17:08:56,169 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for ff9440d2c018d6341465fa04c5e3e803 in 72ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for ff9440d2c018d6341465fa04c5e3e803: 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. for snaptb0-testConsecutiveExports completed. 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5] hfiles 2024-11-23T17:08:56,169 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5 for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:08:56,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742106_1282 (size=107) 2024-11-23T17:08:56,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742106_1282 (size=107) 2024-11-23T17:08:56,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742106_1282 (size=107) 2024-11-23T17:08:56,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:08:56,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-23T17:08:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-11-23T17:08:56,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:56,177 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:56,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ff9440d2c018d6341465fa04c5e3e803 in 235 msec 2024-11-23T17:08:56,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:08:56,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:08:56,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/.tmp/cf/0eb405eac3da4b37acc39065c648d2e4 2024-11-23T17:08:56,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/.tmp/cf/0eb405eac3da4b37acc39065c648d2e4 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4 2024-11-23T17:08:56,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4, entries=43, sequenceid=6, filesize=13.7 K 2024-11-23T17:08:56,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for f33c28667536ddadb93b5f1f7ffa6b91 in 481ms, sequenceid=6, compaction requested=false 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for f33c28667536ddadb93b5f1f7ffa6b91: 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. for snaptb0-testConsecutiveExports completed. 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4] hfiles 2024-11-23T17:08:56,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4 for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:08:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742107_1283 (size=107) 2024-11-23T17:08:56,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742107_1283 (size=107) 2024-11-23T17:08:56,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742107_1283 (size=107) 2024-11-23T17:08:56,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:08:56,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-11-23T17:08:56,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-11-23T17:08:56,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:56,589 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:56,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-11-23T17:08:56,593 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91 in 647 msec 2024-11-23T17:08:56,593 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:08:56,596 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:08:56,597 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:08:56,597 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:08:56,597 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:08:56,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803] hfiles 2024-11-23T17:08:56,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:08:56,599 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:08:56,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742108_1284 (size=291) 2024-11-23T17:08:56,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742108_1284 (size=291) 2024-11-23T17:08:56,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742108_1284 (size=291) 2024-11-23T17:08:56,614 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:08:56,614 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-23T17:08:56,615 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:08:56,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742109_1285 (size=951) 2024-11-23T17:08:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742109_1285 (size=951) 2024-11-23T17:08:56,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742109_1285 (size=951) 2024-11-23T17:08:56,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:08:56,642 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:08:56,643 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:08:56,644 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:08:56,644 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T17:08:56,646 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 713 msec 2024-11-23T17:08:57,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:08:57,068 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:08:57,068 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068 2024-11-23T17:08:57,068 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:57,099 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:08:57,099 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@2b2fb4ec, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:08:57,101 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:08:57,106 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:08:57,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:57,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:57,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-350089205806850650.jar 2024-11-23T17:08:58,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,212 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,278 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-4717594127811366541.jar 2024-11-23T17:08:58,278 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,279 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:08:58,280 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:08:58,280 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:08:58,280 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:08:58,280 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:08:58,281 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:08:58,281 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:08:58,281 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:08:58,281 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:08:58,281 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:08:58,282 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:08:58,282 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:08:58,282 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:58,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:58,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:58,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:58,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:08:58,283 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:58,284 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:08:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742110_1286 (size=24020) 2024-11-23T17:08:58,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742110_1286 (size=24020) 2024-11-23T17:08:58,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742110_1286 (size=24020) 2024-11-23T17:08:58,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742111_1287 (size=77755) 2024-11-23T17:08:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742111_1287 (size=77755) 2024-11-23T17:08:58,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742111_1287 (size=77755) 2024-11-23T17:08:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742112_1288 (size=131360) 2024-11-23T17:08:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742112_1288 (size=131360) 2024-11-23T17:08:58,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742112_1288 (size=131360) 2024-11-23T17:08:58,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742113_1289 (size=111793) 2024-11-23T17:08:58,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742113_1289 (size=111793) 2024-11-23T17:08:58,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742113_1289 (size=111793) 2024-11-23T17:08:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742114_1290 (size=1832290) 2024-11-23T17:08:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742114_1290 (size=1832290) 2024-11-23T17:08:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742114_1290 (size=1832290) 2024-11-23T17:08:58,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742115_1291 (size=8360005) 2024-11-23T17:08:58,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742115_1291 (size=8360005) 2024-11-23T17:08:58,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742115_1291 (size=8360005) 2024-11-23T17:08:58,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742116_1292 (size=440956) 2024-11-23T17:08:58,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742116_1292 (size=440956) 2024-11-23T17:08:58,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742116_1292 (size=440956) 2024-11-23T17:08:58,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742117_1293 (size=503880) 2024-11-23T17:08:58,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742117_1293 (size=503880) 2024-11-23T17:08:58,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742117_1293 (size=503880) 2024-11-23T17:08:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742118_1294 (size=322274) 2024-11-23T17:08:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742118_1294 (size=322274) 2024-11-23T17:08:58,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742118_1294 (size=322274) 2024-11-23T17:08:58,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742119_1295 (size=20406) 2024-11-23T17:08:58,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742119_1295 (size=20406) 2024-11-23T17:08:58,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742119_1295 (size=20406) 2024-11-23T17:08:58,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742120_1296 (size=45609) 2024-11-23T17:08:58,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742120_1296 (size=45609) 2024-11-23T17:08:58,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742120_1296 (size=45609) 2024-11-23T17:08:58,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742121_1297 (size=136454) 2024-11-23T17:08:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742121_1297 (size=136454) 2024-11-23T17:08:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742121_1297 (size=136454) 2024-11-23T17:08:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742122_1298 (size=1597270) 2024-11-23T17:08:58,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742122_1298 (size=1597270) 2024-11-23T17:08:58,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742122_1298 (size=1597270) 2024-11-23T17:08:58,485 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0004_000001 (auth:SIMPLE) from 127.0.0.1:34472 2024-11-23T17:08:58,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742123_1299 (size=30873) 2024-11-23T17:08:58,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742123_1299 (size=30873) 2024-11-23T17:08:58,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742123_1299 (size=30873) 2024-11-23T17:08:58,498 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000001/launch_container.sh] 2024-11-23T17:08:58,498 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000001/container_tokens] 2024-11-23T17:08:58,498 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0004/container_1732381541272_0004_01_000001/sysfs] 2024-11-23T17:08:58,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742124_1300 (size=29229) 2024-11-23T17:08:58,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742124_1300 (size=29229) 2024-11-23T17:08:58,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742124_1300 (size=29229) 2024-11-23T17:08:58,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742125_1301 (size=903661) 2024-11-23T17:08:58,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742125_1301 (size=903661) 2024-11-23T17:08:58,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742125_1301 (size=903661) 2024-11-23T17:08:58,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742126_1302 (size=6424742) 2024-11-23T17:08:58,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742126_1302 (size=6424742) 2024-11-23T17:08:58,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742126_1302 (size=6424742) 2024-11-23T17:08:58,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742127_1303 (size=5175431) 2024-11-23T17:08:58,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742127_1303 (size=5175431) 2024-11-23T17:08:58,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742127_1303 (size=5175431) 2024-11-23T17:08:58,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742128_1304 (size=232881) 2024-11-23T17:08:58,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742128_1304 (size=232881) 2024-11-23T17:08:58,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742128_1304 (size=232881) 2024-11-23T17:08:58,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742129_1305 (size=1323991) 2024-11-23T17:08:58,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742129_1305 (size=1323991) 2024-11-23T17:08:58,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742129_1305 (size=1323991) 2024-11-23T17:08:58,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742130_1306 (size=4695811) 2024-11-23T17:08:58,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742130_1306 (size=4695811) 2024-11-23T17:08:58,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742130_1306 (size=4695811) 2024-11-23T17:08:59,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742131_1307 (size=1877034) 2024-11-23T17:08:59,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742131_1307 (size=1877034) 2024-11-23T17:08:59,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742131_1307 (size=1877034) 2024-11-23T17:08:59,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742132_1308 (size=217555) 2024-11-23T17:08:59,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742132_1308 (size=217555) 2024-11-23T17:08:59,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742132_1308 (size=217555) 2024-11-23T17:08:59,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742133_1309 (size=4188619) 2024-11-23T17:08:59,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742133_1309 (size=4188619) 2024-11-23T17:08:59,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742133_1309 (size=4188619) 2024-11-23T17:08:59,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742134_1310 (size=127628) 2024-11-23T17:08:59,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742134_1310 (size=127628) 2024-11-23T17:08:59,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742134_1310 (size=127628) 2024-11-23T17:08:59,044 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:08:59,046 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T17:08:59,048 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-11-23T17:08:59,048 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-11-23T17:08:59,048 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-11-23T17:08:59,048 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-11-23T17:08:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742135_1311 (size=1023) 2024-11-23T17:08:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742135_1311 (size=1023) 2024-11-23T17:08:59,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742135_1311 (size=1023) 2024-11-23T17:08:59,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742136_1312 (size=35) 2024-11-23T17:08:59,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742136_1312 (size=35) 2024-11-23T17:08:59,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742136_1312 (size=35) 2024-11-23T17:08:59,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742137_1313 (size=304122) 2024-11-23T17:08:59,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742137_1313 (size=304122) 2024-11-23T17:08:59,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742137_1313 (size=304122) 2024-11-23T17:08:59,103 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:59,103 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:08:59,324 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:54142 2024-11-23T17:09:00,115 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:09:03,178 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:09:04,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T17:09:04,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-23T17:09:04,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T17:09:05,839 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:47176 2024-11-23T17:09:06,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742138_1314 (size=349820) 2024-11-23T17:09:06,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742138_1314 (size=349820) 2024-11-23T17:09:06,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742138_1314 (size=349820) 2024-11-23T17:09:08,067 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:41972 2024-11-23T17:09:08,067 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:46406 2024-11-23T17:09:08,923 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:41976 2024-11-23T17:09:08,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:46418 2024-11-23T17:09:09,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:09:11,487 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0005_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:09:13,581 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000002/launch_container.sh] 2024-11-23T17:09:13,581 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000002/container_tokens] 2024-11-23T17:09:13,581 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000002/sysfs] 2024-11-23T17:09:14,338 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000003/launch_container.sh] 2024-11-23T17:09:14,338 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000003/container_tokens] 2024-11-23T17:09:14,338 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000003/sysfs] 2024-11-23T17:09:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742139_1315 (size=31807) 2024-11-23T17:09:14,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742139_1315 (size=31807) 2024-11-23T17:09:14,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742139_1315 (size=31807) 2024-11-23T17:09:14,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742140_1316 (size=463) 2024-11-23T17:09:14,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742140_1316 (size=463) 2024-11-23T17:09:14,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742140_1316 (size=463) 2024-11-23T17:09:14,764 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000004/launch_container.sh] 2024-11-23T17:09:14,764 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000004/container_tokens] 2024-11-23T17:09:14,764 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000004/sysfs] 2024-11-23T17:09:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742141_1317 (size=31807) 2024-11-23T17:09:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742141_1317 (size=31807) 2024-11-23T17:09:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742141_1317 (size=31807) 2024-11-23T17:09:14,778 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000005/launch_container.sh] 2024-11-23T17:09:14,779 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000005/container_tokens] 2024-11-23T17:09:14,779 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000005/sysfs] 2024-11-23T17:09:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742142_1318 (size=349820) 2024-11-23T17:09:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742142_1318 (size=349820) 2024-11-23T17:09:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742142_1318 (size=349820) 2024-11-23T17:09:14,809 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:39462 2024-11-23T17:09:14,815 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:52368 2024-11-23T17:09:14,820 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:39478 2024-11-23T17:09:16,253 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:09:16,254 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:09:16,257 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T17:09:16,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:09:16,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:09:16,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:09:16,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:09:16,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:09:16,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@2b2fb4ec in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:09:16,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:09:16,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:09:16,261 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:16,303 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:16,303 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@2b2fb4ec, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:09:16,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:09:16,321 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:09:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:16,345 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-10525609479289896436.jar 2024-11-23T17:09:17,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-14353587005516200884.jar 2024-11-23T17:09:17,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:09:17,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:09:17,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:09:17,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:09:17,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:09:17,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:09:17,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:09:17,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:09:17,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:09:17,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:09:17,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:17,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:17,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742143_1319 (size=24020) 2024-11-23T17:09:17,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742143_1319 (size=24020) 2024-11-23T17:09:17,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742143_1319 (size=24020) 2024-11-23T17:09:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742144_1320 (size=77755) 2024-11-23T17:09:17,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742144_1320 (size=77755) 2024-11-23T17:09:17,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742144_1320 (size=77755) 2024-11-23T17:09:17,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742145_1321 (size=6424742) 2024-11-23T17:09:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742145_1321 (size=6424742) 2024-11-23T17:09:17,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742145_1321 (size=6424742) 2024-11-23T17:09:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742146_1322 (size=131360) 2024-11-23T17:09:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742146_1322 (size=131360) 2024-11-23T17:09:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742146_1322 (size=131360) 2024-11-23T17:09:17,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742147_1323 (size=111793) 2024-11-23T17:09:17,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742147_1323 (size=111793) 2024-11-23T17:09:17,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742147_1323 (size=111793) 2024-11-23T17:09:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742148_1324 (size=1832290) 2024-11-23T17:09:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742148_1324 (size=1832290) 2024-11-23T17:09:17,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742148_1324 (size=1832290) 2024-11-23T17:09:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742149_1325 (size=8360005) 2024-11-23T17:09:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742149_1325 (size=8360005) 2024-11-23T17:09:17,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742149_1325 (size=8360005) 2024-11-23T17:09:17,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742150_1326 (size=503880) 2024-11-23T17:09:17,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742150_1326 (size=503880) 2024-11-23T17:09:17,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742150_1326 (size=503880) 2024-11-23T17:09:17,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742151_1327 (size=322274) 2024-11-23T17:09:17,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742151_1327 (size=322274) 2024-11-23T17:09:17,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742151_1327 (size=322274) 2024-11-23T17:09:17,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742152_1328 (size=20406) 2024-11-23T17:09:17,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742152_1328 (size=20406) 2024-11-23T17:09:17,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742152_1328 (size=20406) 2024-11-23T17:09:17,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742153_1329 (size=45609) 2024-11-23T17:09:17,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742153_1329 (size=45609) 2024-11-23T17:09:17,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742153_1329 (size=45609) 2024-11-23T17:09:17,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742154_1330 (size=136454) 2024-11-23T17:09:17,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742154_1330 (size=136454) 2024-11-23T17:09:17,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742154_1330 (size=136454) 2024-11-23T17:09:17,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742155_1331 (size=1597270) 2024-11-23T17:09:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742155_1331 (size=1597270) 2024-11-23T17:09:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742155_1331 (size=1597270) 2024-11-23T17:09:17,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742156_1332 (size=30873) 2024-11-23T17:09:17,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742156_1332 (size=30873) 2024-11-23T17:09:17,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742156_1332 (size=30873) 2024-11-23T17:09:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742157_1333 (size=29229) 2024-11-23T17:09:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742157_1333 (size=29229) 2024-11-23T17:09:17,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742157_1333 (size=29229) 2024-11-23T17:09:17,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742158_1334 (size=903661) 2024-11-23T17:09:17,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742158_1334 (size=903661) 2024-11-23T17:09:17,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742158_1334 (size=903661) 2024-11-23T17:09:17,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742159_1335 (size=5175431) 2024-11-23T17:09:17,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742159_1335 (size=5175431) 2024-11-23T17:09:17,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742159_1335 (size=5175431) 2024-11-23T17:09:17,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742160_1336 (size=232881) 2024-11-23T17:09:17,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742160_1336 (size=232881) 2024-11-23T17:09:17,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742160_1336 (size=232881) 2024-11-23T17:09:17,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742161_1337 (size=1323991) 2024-11-23T17:09:17,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742161_1337 (size=1323991) 2024-11-23T17:09:17,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742161_1337 (size=1323991) 2024-11-23T17:09:17,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742162_1338 (size=4695811) 2024-11-23T17:09:17,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742162_1338 (size=4695811) 2024-11-23T17:09:17,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742162_1338 (size=4695811) 2024-11-23T17:09:17,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742163_1339 (size=1877034) 2024-11-23T17:09:17,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742163_1339 (size=1877034) 2024-11-23T17:09:17,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742163_1339 (size=1877034) 2024-11-23T17:09:17,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742164_1340 (size=217555) 2024-11-23T17:09:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742164_1340 (size=217555) 2024-11-23T17:09:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742164_1340 (size=217555) 2024-11-23T17:09:17,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742165_1341 (size=4188619) 2024-11-23T17:09:17,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742165_1341 (size=4188619) 2024-11-23T17:09:17,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742165_1341 (size=4188619) 2024-11-23T17:09:17,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742166_1342 (size=127628) 2024-11-23T17:09:17,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742166_1342 (size=127628) 2024-11-23T17:09:17,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742166_1342 (size=127628) 2024-11-23T17:09:17,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742167_1343 (size=440956) 2024-11-23T17:09:17,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742167_1343 (size=440956) 2024-11-23T17:09:17,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742167_1343 (size=440956) 2024-11-23T17:09:17,952 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:09:17,954 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T17:09:17,957 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.7 K 2024-11-23T17:09:17,957 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.7 K 2024-11-23T17:09:17,957 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.6 K 2024-11-23T17:09:17,957 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.3 K 2024-11-23T17:09:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742168_1344 (size=1023) 2024-11-23T17:09:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742168_1344 (size=1023) 2024-11-23T17:09:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742168_1344 (size=1023) 2024-11-23T17:09:17,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742169_1345 (size=35) 2024-11-23T17:09:17,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742169_1345 (size=35) 2024-11-23T17:09:17,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742169_1345 (size=35) 2024-11-23T17:09:18,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742170_1346 (size=304128) 2024-11-23T17:09:18,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742170_1346 (size=304128) 2024-11-23T17:09:18,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742170_1346 (size=304128) 2024-11-23T17:09:20,891 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:09:20,891 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:09:20,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0005_000001 (auth:SIMPLE) from 127.0.0.1:47038 2024-11-23T17:09:20,910 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000001/launch_container.sh] 2024-11-23T17:09:20,911 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000001/container_tokens] 2024-11-23T17:09:20,911 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0005/container_1732381541272_0005_01_000001/sysfs] 2024-11-23T17:09:21,763 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:52130 2024-11-23T17:09:28,063 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:53024 2024-11-23T17:09:28,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742171_1347 (size=349826) 2024-11-23T17:09:28,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742171_1347 (size=349826) 2024-11-23T17:09:28,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742171_1347 (size=349826) 2024-11-23T17:09:30,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:47044 2024-11-23T17:09:30,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:52136 2024-11-23T17:09:31,147 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:46120 2024-11-23T17:09:31,148 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:35622 2024-11-23T17:09:33,178 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:09:33,896 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0006_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:09:35,758 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000002/launch_container.sh] 2024-11-23T17:09:35,758 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000002/container_tokens] 2024-11-23T17:09:35,758 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000002/sysfs] 2024-11-23T17:09:37,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000005/launch_container.sh] 2024-11-23T17:09:37,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000005/container_tokens] 2024-11-23T17:09:37,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000005/sysfs] 2024-11-23T17:09:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742172_1348 (size=29748) 2024-11-23T17:09:37,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742172_1348 (size=29748) 2024-11-23T17:09:37,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742172_1348 (size=29748) 2024-11-23T17:09:37,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742173_1349 (size=463) 2024-11-23T17:09:37,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742173_1349 (size=463) 2024-11-23T17:09:37,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742173_1349 (size=463) 2024-11-23T17:09:37,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742174_1350 (size=29748) 2024-11-23T17:09:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742174_1350 (size=29748) 2024-11-23T17:09:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742174_1350 (size=29748) 2024-11-23T17:09:37,327 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000004/launch_container.sh] 2024-11-23T17:09:37,327 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000004/container_tokens] 2024-11-23T17:09:37,327 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000004/sysfs] 2024-11-23T17:09:37,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742175_1351 (size=349826) 2024-11-23T17:09:37,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742175_1351 (size=349826) 2024-11-23T17:09:37,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742175_1351 (size=349826) 2024-11-23T17:09:37,356 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:46130 2024-11-23T17:09:37,363 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:35636 2024-11-23T17:09:38,407 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:09:38,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:09:38,415 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T17:09:38,415 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:09:38,415 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:09:38,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:09:38,416 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:09:38,416 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:09:38,416 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@2b2fb4ec in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:09:38,417 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:09:38,417 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381737068/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:09:38,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-23T17:09:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:09:38,445 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381778445"}]},"ts":"1732381778445"} 2024-11-23T17:09:38,448 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-23T17:09:38,448 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-23T17:09:38,449 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-23T17:09:38,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, UNASSIGN}] 2024-11-23T17:09:38,453 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, UNASSIGN 2024-11-23T17:09:38,453 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, UNASSIGN 2024-11-23T17:09:38,454 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=f33c28667536ddadb93b5f1f7ffa6b91, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:38,454 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=ff9440d2c018d6341465fa04c5e3e803, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:09:38,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, UNASSIGN because future has completed 2024-11-23T17:09:38,456 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:09:38,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:09:38,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, UNASSIGN because future has completed 2024-11-23T17:09:38,458 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:09:38,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:09:38,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:09:38,611 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing ff9440d2c018d6341465fa04c5e3e803, disabling compactions & flushes 2024-11-23T17:09:38,612 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:09:38,611 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing f33c28667536ddadb93b5f1f7ffa6b91, disabling compactions & flushes 2024-11-23T17:09:38,612 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. after waiting 0 ms 2024-11-23T17:09:38,612 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:09:38,613 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:09:38,613 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. after waiting 0 ms 2024-11-23T17:09:38,613 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:09:38,620 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:09:38,621 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:09:38,621 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803. 2024-11-23T17:09:38,621 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:09:38,621 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for ff9440d2c018d6341465fa04c5e3e803: Waiting for close lock at 1732381778612Running coprocessor pre-close hooks at 1732381778612Disabling compacts and flushes for region at 1732381778612Disabling writes for close at 1732381778613 (+1 ms)Writing region close event to WAL at 1732381778614 (+1 ms)Running coprocessor post-close hooks at 1732381778621 (+7 ms)Closed at 1732381778621 2024-11-23T17:09:38,622 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:09:38,622 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91. 2024-11-23T17:09:38,622 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for f33c28667536ddadb93b5f1f7ffa6b91: Waiting for close lock at 1732381778612Running coprocessor pre-close hooks at 1732381778612Disabling compacts and flushes for region at 1732381778612Disabling writes for close at 1732381778612Writing region close event to WAL at 1732381778615 (+3 ms)Running coprocessor post-close hooks at 1732381778622 (+7 ms)Closed at 1732381778622 2024-11-23T17:09:38,624 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:09:38,624 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=ff9440d2c018d6341465fa04c5e3e803, regionState=CLOSED 2024-11-23T17:09:38,626 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=f33c28667536ddadb93b5f1f7ffa6b91, regionState=CLOSED 2024-11-23T17:09:38,627 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:09:38,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:09:38,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:09:38,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-11-23T17:09:38,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure ff9440d2c018d6341465fa04c5e3e803, server=1a7c5470b472,37875,1732381534675 in 174 msec 2024-11-23T17:09:38,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-11-23T17:09:38,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure f33c28667536ddadb93b5f1f7ffa6b91, server=1a7c5470b472,43453,1732381534825 in 173 msec 2024-11-23T17:09:38,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=ff9440d2c018d6341465fa04c5e3e803, UNASSIGN in 182 msec 2024-11-23T17:09:38,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-23T17:09:38,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=f33c28667536ddadb93b5f1f7ffa6b91, UNASSIGN in 183 msec 2024-11-23T17:09:38,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-23T17:09:38,640 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 189 msec 2024-11-23T17:09:38,642 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381778641"}]},"ts":"1732381778641"} 2024-11-23T17:09:38,644 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-23T17:09:38,644 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-23T17:09:38,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 204 msec 2024-11-23T17:09:38,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:09:38,759 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:09:38,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-23T17:09:38,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,761 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-23T17:09:38,763 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,766 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-23T17:09:38,768 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:09:38,768 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:09:38,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:09:38,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:09:38,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:09:38,771 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:38,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:09:38,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:38,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-23T17:09:38,774 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/recovered.edits] 2024-11-23T17:09:38,776 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/recovered.edits] 2024-11-23T17:09:38,782 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/cf/0eb405eac3da4b37acc39065c648d2e4 2024-11-23T17:09:38,783 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/cf/2c60132add8443f2ac5db06c4c1b63e5 2024-11-23T17:09:38,786 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91/recovered.edits/9.seqid 2024-11-23T17:09:38,787 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:09:38,788 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803/recovered.edits/9.seqid 2024-11-23T17:09:38,789 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testConsecutiveExports/ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:09:38,789 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-23T17:09:38,789 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-23T17:09:38,790 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-23T17:09:38,794 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b202411233b171caece614998b092eedbb8575631_f33c28667536ddadb93b5f1f7ffa6b91 2024-11-23T17:09:38,795 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202411236fb0a244f3a64dbd9ae42eadd6503c6b_ff9440d2c018d6341465fa04c5e3e803 2024-11-23T17:09:38,796 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-23T17:09:38,799 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,802 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-23T17:09:38,805 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-23T17:09:38,807 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,807 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-23T17:09:38,807 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381778807"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:38,807 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381778807"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:38,810 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:09:38,810 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ff9440d2c018d6341465fa04c5e3e803, NAME => 'testtb-testConsecutiveExports,,1732381734900.ff9440d2c018d6341465fa04c5e3e803.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f33c28667536ddadb93b5f1f7ffa6b91, NAME => 'testtb-testConsecutiveExports,1,1732381734900.f33c28667536ddadb93b5f1f7ffa6b91.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:09:38,810 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-23T17:09:38,810 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381778810"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:38,815 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-23T17:09:38,816 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:09:38,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 57 msec 2024-11-23T17:09:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-23T17:09:38,879 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-23T17:09:38,879 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:09:38,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T17:09:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-23T17:09:38,892 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T17:09:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-23T17:09:38,923 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=805 (was 807), OpenFileDescriptor=802 (was 812), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=732 (was 685) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2790 (was 3196) 2024-11-23T17:09:38,923 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-23T17:09:38,949 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=805, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=732, ProcessCount=17, AvailableMemoryMB=2786 2024-11-23T17:09:38,949 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-23T17:09:38,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:09:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:38,953 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:09:38,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-11-23T17:09:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:09:38,955 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:09:38,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742176_1352 (size=458) 2024-11-23T17:09:38,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742176_1352 (size=458) 2024-11-23T17:09:38,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742176_1352 (size=458) 2024-11-23T17:09:38,977 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c6d43d02a62db34e972bce100a1c993f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:38,982 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 19dbb7b062c887aed1dc9076d6db561a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742177_1353 (size=83) 2024-11-23T17:09:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742177_1353 (size=83) 2024-11-23T17:09:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742177_1353 (size=83) 2024-11-23T17:09:39,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:39,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing c6d43d02a62db34e972bce100a1c993f, disabling compactions & flushes 2024-11-23T17:09:39,004 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,004 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,004 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. after waiting 0 ms 2024-11-23T17:09:39,004 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,004 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,004 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for c6d43d02a62db34e972bce100a1c993f: Waiting for close lock at 1732381779003Disabling compacts and flushes for region at 1732381779003Disabling writes for close at 1732381779004 (+1 ms)Writing region close event to WAL at 1732381779004Closed at 1732381779004 2024-11-23T17:09:39,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742178_1354 (size=83) 2024-11-23T17:09:39,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742178_1354 (size=83) 2024-11-23T17:09:39,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742178_1354 (size=83) 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 19dbb7b062c887aed1dc9076d6db561a, disabling compactions & flushes 2024-11-23T17:09:39,012 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. after waiting 0 ms 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,012 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,012 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 19dbb7b062c887aed1dc9076d6db561a: Waiting for close lock at 1732381779012Disabling compacts and flushes for region at 1732381779012Disabling writes for close at 1732381779012Writing region close event to WAL at 1732381779012Closed at 1732381779012 2024-11-23T17:09:39,013 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:09:39,014 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732381779013"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381779013"}]},"ts":"1732381779013"} 2024-11-23T17:09:39,014 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732381779013"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381779013"}]},"ts":"1732381779013"} 2024-11-23T17:09:39,017 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:09:39,021 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:09:39,021 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381779021"}]},"ts":"1732381779021"} 2024-11-23T17:09:39,023 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-23T17:09:39,023 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:09:39,025 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:09:39,025 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:09:39,025 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:09:39,025 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:09:39,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, ASSIGN}] 2024-11-23T17:09:39,027 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, ASSIGN 2024-11-23T17:09:39,028 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, ASSIGN 2024-11-23T17:09:39,029 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:09:39,029 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:09:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:09:39,180 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:09:39,180 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=19dbb7b062c887aed1dc9076d6db561a, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:39,180 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=c6d43d02a62db34e972bce100a1c993f, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:39,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, ASSIGN because future has completed 2024-11-23T17:09:39,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:09:39,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, ASSIGN because future has completed 2024-11-23T17:09:39,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:09:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:09:39,339 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,339 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 19dbb7b062c887aed1dc9076d6db561a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:09:39,339 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. service=AccessControlService 2024-11-23T17:09:39,339 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:09:39,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:39,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,340 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => c6d43d02a62db34e972bce100a1c993f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:09:39,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. service=AccessControlService 2024-11-23T17:09:39,341 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:09:39,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,341 INFO [StoreOpener-19dbb7b062c887aed1dc9076d6db561a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:39,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,342 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,343 INFO [StoreOpener-19dbb7b062c887aed1dc9076d6db561a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 19dbb7b062c887aed1dc9076d6db561a columnFamilyName cf 2024-11-23T17:09:39,343 INFO [StoreOpener-c6d43d02a62db34e972bce100a1c993f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,344 DEBUG [StoreOpener-19dbb7b062c887aed1dc9076d6db561a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:39,344 INFO [StoreOpener-c6d43d02a62db34e972bce100a1c993f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6d43d02a62db34e972bce100a1c993f columnFamilyName cf 2024-11-23T17:09:39,344 INFO [StoreOpener-19dbb7b062c887aed1dc9076d6db561a-1 {}] regionserver.HStore(327): Store=19dbb7b062c887aed1dc9076d6db561a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:09:39,345 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,345 DEBUG [StoreOpener-c6d43d02a62db34e972bce100a1c993f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:39,346 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,346 INFO [StoreOpener-c6d43d02a62db34e972bce100a1c993f-1 {}] regionserver.HStore(327): Store=c6d43d02a62db34e972bce100a1c993f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:09:39,346 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,346 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,346 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,347 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,347 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,347 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,348 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,348 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,348 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,350 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,350 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:09:39,351 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 19dbb7b062c887aed1dc9076d6db561a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64411134, jitterRate=-0.04019930958747864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:09:39,351 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,352 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 19dbb7b062c887aed1dc9076d6db561a: Running coprocessor pre-open hook at 1732381779340Writing region info on filesystem at 1732381779340Initializing all the Stores at 1732381779341 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381779341Cleaning up temporary data from old regions at 1732381779347 (+6 ms)Running coprocessor post-open hooks at 1732381779351 (+4 ms)Region opened successfully at 1732381779351 2024-11-23T17:09:39,352 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:09:39,352 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a., pid=135, masterSystemTime=1732381779335 2024-11-23T17:09:39,352 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened c6d43d02a62db34e972bce100a1c993f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62866345, jitterRate=-0.06321845948696136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:09:39,353 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,353 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for c6d43d02a62db34e972bce100a1c993f: Running coprocessor pre-open hook at 1732381779342Writing region info on filesystem at 1732381779342Initializing all the Stores at 1732381779342Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381779342Cleaning up temporary data from old regions at 1732381779348 (+6 ms)Running coprocessor post-open hooks at 1732381779353 (+5 ms)Region opened successfully at 1732381779353 2024-11-23T17:09:39,354 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f., pid=136, masterSystemTime=1732381779338 2024-11-23T17:09:39,355 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,355 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,356 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=19dbb7b062c887aed1dc9076d6db561a, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:39,357 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,357 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,357 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=c6d43d02a62db34e972bce100a1c993f, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:39,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:09:39,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:09:39,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-11-23T17:09:39,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872 in 177 msec 2024-11-23T17:09:39,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-11-23T17:09:39,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, ASSIGN in 337 msec 2024-11-23T17:09:39,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825 in 178 msec 2024-11-23T17:09:39,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-11-23T17:09:39,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, ASSIGN in 338 msec 2024-11-23T17:09:39,367 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:09:39,367 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381779367"}]},"ts":"1732381779367"} 2024-11-23T17:09:39,369 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-23T17:09:39,370 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:09:39,371 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-23T17:09:39,374 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:09:39,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:39,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:39,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:39,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:39,385 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:39,385 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:39,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:39,386 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:39,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 434 msec 2024-11-23T17:09:39,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:09:39,593 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:09:39,593 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:09:39,599 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,604 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,610 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:09:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381779613 (current time:1732381779613). 2024-11-23T17:09:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:09:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T17:09:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:09:39,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70e16a37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:39,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:39,615 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:39,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:39,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:39,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fecdafb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:39,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:39,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,617 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48466, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:39,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@314f4a6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:39,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:39,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:39,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:39,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:09:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,621 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:39,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@730543a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:39,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:39,623 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23721e85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:39,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,624 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48492, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:39,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38a9243b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:39,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:39,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:39,627 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37010, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:39,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:09:39,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:09:39,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:39,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,630 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:09:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:09:39,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:09:39,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T17:09:39,633 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:09:39,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:09:39,634 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:09:39,637 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:09:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742179_1355 (size=215) 2024-11-23T17:09:39,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742179_1355 (size=215) 2024-11-23T17:09:39,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742179_1355 (size=215) 2024-11-23T17:09:39,647 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:09:39,647 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a}] 2024-11-23T17:09:39,648 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,649 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:09:39,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-11-23T17:09:39,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 19dbb7b062c887aed1dc9076d6db561a: 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for c6d43d02a62db34e972bce100a1c993f: 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:09:39,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:09:39,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:09:39,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742181_1357 (size=86) 2024-11-23T17:09:39,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742181_1357 (size=86) 2024-11-23T17:09:39,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742181_1357 (size=86) 2024-11-23T17:09:39,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-23T17:09:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-23T17:09:39,811 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,811 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:39,814 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f in 165 msec 2024-11-23T17:09:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742180_1356 (size=86) 2024-11-23T17:09:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742180_1356 (size=86) 2024-11-23T17:09:39,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742180_1356 (size=86) 2024-11-23T17:09:39,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:39,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T17:09:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-11-23T17:09:39,820 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,820 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:39,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-11-23T17:09:39,823 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:09:39,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a in 174 msec 2024-11-23T17:09:39,824 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:09:39,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:09:39,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:09:39,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:39,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:09:39,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742182_1358 (size=78) 2024-11-23T17:09:39,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742182_1358 (size=78) 2024-11-23T17:09:39,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742182_1358 (size=78) 2024-11-23T17:09:39,836 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:09:39,836 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,836 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742183_1359 (size=713) 2024-11-23T17:09:39,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742183_1359 (size=713) 2024-11-23T17:09:39,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742183_1359 (size=713) 2024-11-23T17:09:39,856 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:09:39,861 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:09:39,861 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,863 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:09:39,863 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T17:09:39,870 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 232 msec 2024-11-23T17:09:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:09:39,949 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:09:39,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:09:39,959 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:09:39,961 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,964 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:39,964 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:39,965 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:09:39,967 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,973 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,979 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:09:39,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381779982 (current time:1732381779982). 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695d1d70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:39,984 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:39,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:39,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:39,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72321d4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:39,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:39,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,986 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:39,986 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26358a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:39,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:39,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:39,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37016, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:39,990 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:09:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,990 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2613ade5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:39,991 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:39,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:39,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:39,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65e48b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:39,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:39,992 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:39,993 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:39,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71b4dc5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:39,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:39,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:39,997 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37020, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:39,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:09:40,000 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:09:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:09:40,001 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:09:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:09:40,004 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:09:40,005 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:09:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T17:09:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:09:40,010 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:09:40,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742184_1360 (size=210) 2024-11-23T17:09:40,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742184_1360 (size=210) 2024-11-23T17:09:40,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742184_1360 (size=210) 2024-11-23T17:09:40,033 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:09:40,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a}] 2024-11-23T17:09:40,035 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:40,035 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:09:40,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-11-23T17:09:40,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:40,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 19dbb7b062c887aed1dc9076d6db561a 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-23T17:09:40,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-23T17:09:40,188 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:40,188 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing c6d43d02a62db34e972bce100a1c993f 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-23T17:09:40,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a is 71, key is 14255147d9c2b9b87ea87479cad2b65d/cf:q/1732381779959/Put/seqid=0 2024-11-23T17:09:40,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f is 71, key is 003296b8b45d025cbd50b3e7fa8a7595/cf:q/1732381779957/Put/seqid=0 2024-11-23T17:09:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742185_1361 (size=8031) 2024-11-23T17:09:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742185_1361 (size=8031) 2024-11-23T17:09:40,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742185_1361 (size=8031) 2024-11-23T17:09:40,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,239 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:40,240 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/.tmp/cf/6e0925d7c7c244d38b12013dd4ff0c6a, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=19dbb7b062c887aed1dc9076d6db561a] 2024-11-23T17:09:40,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/.tmp/cf/6e0925d7c7c244d38b12013dd4ff0c6a is 224, key is 1471fa01b1cff145931ae6abec1d75d74/cf:q/1732381779959/Put/seqid=0 2024-11-23T17:09:40,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742186_1362 (size=5241) 2024-11-23T17:09:40,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742186_1362 (size=5241) 2024-11-23T17:09:40,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742186_1362 (size=5241) 2024-11-23T17:09:40,247 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,253 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:40,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/.tmp/cf/a9d4bcb30b774928a8734eca1d1b57cd, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=c6d43d02a62db34e972bce100a1c993f] 2024-11-23T17:09:40,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/.tmp/cf/a9d4bcb30b774928a8734eca1d1b57cd is 224, key is 05d4912c1ef02cb1871cdfb926360be17/cf:q/1732381779957/Put/seqid=0 2024-11-23T17:09:40,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742188_1364 (size=6416) 2024-11-23T17:09:40,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742188_1364 (size=6416) 2024-11-23T17:09:40,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742188_1364 (size=6416) 2024-11-23T17:09:40,263 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/.tmp/cf/a9d4bcb30b774928a8734eca1d1b57cd 2024-11-23T17:09:40,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/.tmp/cf/a9d4bcb30b774928a8734eca1d1b57cd as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd 2024-11-23T17:09:40,283 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd, entries=5, sequenceid=6, filesize=6.3 K 2024-11-23T17:09:40,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for c6d43d02a62db34e972bce100a1c993f in 96ms, sequenceid=6, compaction requested=false 2024-11-23T17:09:40,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-23T17:09:40,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for c6d43d02a62db34e972bce100a1c993f: 2024-11-23T17:09:40,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:09:40,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:09:40,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd] hfiles 2024-11-23T17:09:40,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742187_1363 (size=15277) 2024-11-23T17:09:40,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742187_1363 (size=15277) 2024-11-23T17:09:40,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742187_1363 (size=15277) 2024-11-23T17:09:40,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/.tmp/cf/6e0925d7c7c244d38b12013dd4ff0c6a 2024-11-23T17:09:40,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/.tmp/cf/6e0925d7c7c244d38b12013dd4ff0c6a as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a 2024-11-23T17:09:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742189_1365 (size=125) 2024-11-23T17:09:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742189_1365 (size=125) 2024-11-23T17:09:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742189_1365 (size=125) 2024-11-23T17:09:40,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:09:40,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T17:09:40,319 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a, entries=45, sequenceid=6, filesize=14.9 K 2024-11-23T17:09:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-23T17:09:40,319 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:40,319 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:40,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 19dbb7b062c887aed1dc9076d6db561a in 132ms, sequenceid=6, compaction requested=false 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 19dbb7b062c887aed1dc9076d6db561a: 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a] hfiles 2024-11-23T17:09:40,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c6d43d02a62db34e972bce100a1c993f in 287 msec 2024-11-23T17:09:40,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:09:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742190_1366 (size=125) 2024-11-23T17:09:40,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742190_1366 (size=125) 2024-11-23T17:09:40,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742190_1366 (size=125) 2024-11-23T17:09:40,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:09:40,331 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-11-23T17:09:40,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-11-23T17:09:40,331 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:40,332 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:40,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-11-23T17:09:40,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 19dbb7b062c887aed1dc9076d6db561a in 299 msec 2024-11-23T17:09:40,334 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:09:40,335 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:09:40,336 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:09:40,336 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:09:40,336 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f] hfiles 2024-11-23T17:09:40,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:09:40,337 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:09:40,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742191_1367 (size=309) 2024-11-23T17:09:40,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742191_1367 (size=309) 2024-11-23T17:09:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742191_1367 (size=309) 2024-11-23T17:09:40,352 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:09:40,352 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,353 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742192_1368 (size=1023) 2024-11-23T17:09:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742192_1368 (size=1023) 2024-11-23T17:09:40,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742192_1368 (size=1023) 2024-11-23T17:09:40,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:09:40,401 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:09:40,401 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:40,403 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:09:40,403 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T17:09:40,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 401 msec 2024-11-23T17:09:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:09:40,638 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:09:40,640 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:09:40,641 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:09:40,642 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:09:40,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:09:40,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54118, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:09:40,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57576, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:09:40,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:09:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:40,647 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:09:40,647 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,647 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-11-23T17:09:40,648 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:09:40,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:09:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742193_1369 (size=399) 2024-11-23T17:09:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742193_1369 (size=399) 2024-11-23T17:09:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742193_1369 (size=399) 2024-11-23T17:09:40,657 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5558f6bbdfa2afa0f1fb6712bb9ba0f1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:40,658 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f24d47972914c5327c02f79c10022d00, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:40,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742194_1370 (size=85) 2024-11-23T17:09:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742195_1371 (size=85) 2024-11-23T17:09:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742195_1371 (size=85) 2024-11-23T17:09:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742195_1371 (size=85) 2024-11-23T17:09:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742194_1370 (size=85) 2024-11-23T17:09:40,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742194_1370 (size=85) 2024-11-23T17:09:40,671 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:40,671 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing f24d47972914c5327c02f79c10022d00, disabling compactions & flushes 2024-11-23T17:09:40,671 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:40,671 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:40,671 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. after waiting 0 ms 2024-11-23T17:09:40,671 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:40,671 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for f24d47972914c5327c02f79c10022d00: Waiting for close lock at 1732381780671Disabling compacts and flushes for region at 1732381780671Disabling writes for close at 1732381780671Writing region close event to WAL at 1732381780671Closed at 1732381780671 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 5558f6bbdfa2afa0f1fb6712bb9ba0f1, disabling compactions & flushes 2024-11-23T17:09:40,672 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. after waiting 0 ms 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:40,672 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:40,672 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5558f6bbdfa2afa0f1fb6712bb9ba0f1: Waiting for close lock at 1732381780672Disabling compacts and flushes for region at 1732381780672Disabling writes for close at 1732381780672Writing region close event to WAL at 1732381780672Closed at 1732381780672 2024-11-23T17:09:40,673 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:09:40,674 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732381780674"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381780674"}]},"ts":"1732381780674"} 2024-11-23T17:09:40,674 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732381780674"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381780674"}]},"ts":"1732381780674"} 2024-11-23T17:09:40,677 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:09:40,678 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:09:40,679 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381780678"}]},"ts":"1732381780678"} 2024-11-23T17:09:40,680 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-23T17:09:40,681 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:09:40,682 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:09:40,682 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:09:40,682 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:09:40,682 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:09:40,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, ASSIGN}] 2024-11-23T17:09:40,684 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, ASSIGN 2024-11-23T17:09:40,684 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, ASSIGN 2024-11-23T17:09:40,685 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:09:40,685 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:09:40,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:09:40,835 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:09:40,836 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=5558f6bbdfa2afa0f1fb6712bb9ba0f1, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:40,836 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=f24d47972914c5327c02f79c10022d00, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:40,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, ASSIGN because future has completed 2024-11-23T17:09:40,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:09:40,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, ASSIGN because future has completed 2024-11-23T17:09:40,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:09:40,893 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region c6d43d02a62db34e972bce100a1c993f changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:09:40,893 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 19dbb7b062c887aed1dc9076d6db561a changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:09:40,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:09:40,994 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:40,994 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:40,994 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 5558f6bbdfa2afa0f1fb6712bb9ba0f1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.', STARTKEY => '', ENDKEY => '2'} 2024-11-23T17:09:40,994 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => f24d47972914c5327c02f79c10022d00, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.', STARTKEY => '2', ENDKEY => ''} 2024-11-23T17:09:40,994 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. service=AccessControlService 2024-11-23T17:09:40,994 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. service=AccessControlService 2024-11-23T17:09:40,994 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:09:40,995 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:40,995 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:40,996 INFO [StoreOpener-5558f6bbdfa2afa0f1fb6712bb9ba0f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:40,996 INFO [StoreOpener-f24d47972914c5327c02f79c10022d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:40,998 INFO [StoreOpener-f24d47972914c5327c02f79c10022d00-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f24d47972914c5327c02f79c10022d00 columnFamilyName cf 2024-11-23T17:09:40,998 DEBUG [StoreOpener-f24d47972914c5327c02f79c10022d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,999 INFO [StoreOpener-5558f6bbdfa2afa0f1fb6712bb9ba0f1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5558f6bbdfa2afa0f1fb6712bb9ba0f1 columnFamilyName cf 2024-11-23T17:09:40,999 INFO [StoreOpener-f24d47972914c5327c02f79c10022d00-1 {}] regionserver.HStore(327): Store=f24d47972914c5327c02f79c10022d00/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:09:40,999 DEBUG [StoreOpener-5558f6bbdfa2afa0f1fb6712bb9ba0f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:40,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:40,999 INFO [StoreOpener-5558f6bbdfa2afa0f1fb6712bb9ba0f1-1 {}] regionserver.HStore(327): Store=5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:09:40,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,000 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,001 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,002 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,002 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:09:41,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:09:41,005 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened f24d47972914c5327c02f79c10022d00; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64420924, jitterRate=-0.04005342721939087}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:09:41,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,005 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 5558f6bbdfa2afa0f1fb6712bb9ba0f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60090391, jitterRate=-0.10458339750766754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:09:41,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 5558f6bbdfa2afa0f1fb6712bb9ba0f1: Running coprocessor pre-open hook at 1732381780995Writing region info on filesystem at 1732381780995Initializing all the Stores at 1732381780996 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381780996Cleaning up temporary data from old regions at 1732381781001 (+5 ms)Running coprocessor post-open hooks at 1732381781006 (+5 ms)Region opened successfully at 1732381781006 2024-11-23T17:09:41,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for f24d47972914c5327c02f79c10022d00: Running coprocessor pre-open hook at 1732381780995Writing region info on filesystem at 1732381780995Initializing all the Stores at 1732381780996 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381780996Cleaning up temporary data from old regions at 1732381781001 (+5 ms)Running coprocessor post-open hooks at 1732381781005 (+4 ms)Region opened successfully at 1732381781006 (+1 ms) 2024-11-23T17:09:41,007 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00., pid=147, masterSystemTime=1732381780991 2024-11-23T17:09:41,007 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1., pid=146, masterSystemTime=1732381780990 2024-11-23T17:09:41,009 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,009 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,010 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=f24d47972914c5327c02f79c10022d00, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:41,011 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,011 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,011 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=5558f6bbdfa2afa0f1fb6712bb9ba0f1, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:41,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:09:41,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:09:41,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-11-23T17:09:41,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825 in 173 msec 2024-11-23T17:09:41,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-23T17:09:41,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, ASSIGN in 332 msec 2024-11-23T17:09:41,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872 in 176 msec 2024-11-23T17:09:41,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-11-23T17:09:41,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, ASSIGN in 333 msec 2024-11-23T17:09:41,019 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:09:41,019 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381781019"}]},"ts":"1732381781019"} 2024-11-23T17:09:41,021 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-23T17:09:41,023 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:09:41,024 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-23T17:09:41,027 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T17:09:41,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:41,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:41,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:41,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:09:41,031 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:09:41,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 387 msec 2024-11-23T17:09:41,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:09:41,278 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:09:41,283 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1., hostname=1a7c5470b472,44711,1732381534872, seqNum=2] 2024-11-23T17:09:41,288 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00., hostname=1a7c5470b472,43453,1732381534825, seqNum=2] 2024-11-23T17:09:41,290 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-23T17:09:41,304 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00] 2024-11-23T17:09:41,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00], force=true 2024-11-23T17:09:41,310 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00], force=true 2024-11-23T17:09:41,310 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00], force=true 2024-11-23T17:09:41,310 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00], force=true 2024-11-23T17:09:41,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:09:41,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, UNASSIGN}] 2024-11-23T17:09:41,322 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, UNASSIGN 2024-11-23T17:09:41,322 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, UNASSIGN 2024-11-23T17:09:41,323 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=5558f6bbdfa2afa0f1fb6712bb9ba0f1, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:41,323 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=f24d47972914c5327c02f79c10022d00, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:09:41,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, UNASSIGN because future has completed 2024-11-23T17:09:41,326 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:09:41,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:09:41,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, UNASSIGN because future has completed 2024-11-23T17:09:41,328 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:09:41,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:09:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:09:41,479 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,479 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:09:41,479 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 5558f6bbdfa2afa0f1fb6712bb9ba0f1, disabling compactions & flushes 2024-11-23T17:09:41,479 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,479 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,479 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. after waiting 0 ms 2024-11-23T17:09:41,479 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,479 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 5558f6bbdfa2afa0f1fb6712bb9ba0f1 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T17:09:41,480 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:09:41,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing f24d47972914c5327c02f79c10022d00, disabling compactions & flushes 2024-11-23T17:09:41,481 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. after waiting 0 ms 2024-11-23T17:09:41,481 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,481 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing f24d47972914c5327c02f79c10022d00 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T17:09:41,503 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/.tmp/cf/69887fd8068246c4b1fe914aa21b8510 is 28, key is 2/cf:/1732381781289/Put/seqid=0 2024-11-23T17:09:41,504 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/.tmp/cf/90dbb359e9614fc4b58e7d28d9d868f6 is 28, key is 1/cf:/1732381781284/Put/seqid=0 2024-11-23T17:09:41,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742197_1373 (size=4945) 2024-11-23T17:09:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742196_1372 (size=4945) 2024-11-23T17:09:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742196_1372 (size=4945) 2024-11-23T17:09:41,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742197_1373 (size=4945) 2024-11-23T17:09:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742197_1373 (size=4945) 2024-11-23T17:09:41,517 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/.tmp/cf/69887fd8068246c4b1fe914aa21b8510 2024-11-23T17:09:41,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742196_1372 (size=4945) 2024-11-23T17:09:41,518 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/.tmp/cf/90dbb359e9614fc4b58e7d28d9d868f6 2024-11-23T17:09:41,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/.tmp/cf/69887fd8068246c4b1fe914aa21b8510 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510 2024-11-23T17:09:41,529 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/.tmp/cf/90dbb359e9614fc4b58e7d28d9d868f6 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6 2024-11-23T17:09:41,536 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T17:09:41,535 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T17:09:41,538 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for f24d47972914c5327c02f79c10022d00 in 56ms, sequenceid=5, compaction requested=false 2024-11-23T17:09:41,538 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 in 59ms, sequenceid=5, compaction requested=false 2024-11-23T17:09:41,538 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-23T17:09:41,538 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-23T17:09:41,565 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:09:41,565 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:09:41,566 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:09:41,566 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. 2024-11-23T17:09:41,566 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:09:41,566 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for f24d47972914c5327c02f79c10022d00: Waiting for close lock at 1732381781481Running coprocessor pre-close hooks at 1732381781481Disabling compacts and flushes for region at 1732381781481Disabling writes for close at 1732381781481Obtaining lock to block concurrent updates at 1732381781481Preparing flush snapshotting stores in f24d47972914c5327c02f79c10022d00 at 1732381781481Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732381781481Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00. at 1732381781482 (+1 ms)Flushing f24d47972914c5327c02f79c10022d00/cf: creating writer at 1732381781482Flushing f24d47972914c5327c02f79c10022d00/cf: appending metadata at 1732381781503 (+21 ms)Flushing f24d47972914c5327c02f79c10022d00/cf: closing flushed file at 1732381781503Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2adeb426: reopening flushed file at 1732381781523 (+20 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for f24d47972914c5327c02f79c10022d00 in 56ms, sequenceid=5, compaction requested=false at 1732381781538 (+15 ms)Writing region close event to WAL at 1732381781562 (+24 ms)Running coprocessor post-close hooks at 1732381781566 (+4 ms)Closed at 1732381781566 2024-11-23T17:09:41,566 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. 2024-11-23T17:09:41,566 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 5558f6bbdfa2afa0f1fb6712bb9ba0f1: Waiting for close lock at 1732381781479Running coprocessor pre-close hooks at 1732381781479Disabling compacts and flushes for region at 1732381781479Disabling writes for close at 1732381781479Obtaining lock to block concurrent updates at 1732381781479Preparing flush snapshotting stores in 5558f6bbdfa2afa0f1fb6712bb9ba0f1 at 1732381781479Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732381781480 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1. at 1732381781480Flushing 5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf: creating writer at 1732381781481 (+1 ms)Flushing 5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf: appending metadata at 1732381781503 (+22 ms)Flushing 5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf: closing flushed file at 1732381781503Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f19eefe: reopening flushed file at 1732381781529 (+26 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 5558f6bbdfa2afa0f1fb6712bb9ba0f1 in 59ms, sequenceid=5, compaction requested=false at 1732381781538 (+9 ms)Writing region close event to WAL at 1732381781561 (+23 ms)Running coprocessor post-close hooks at 1732381781566 (+5 ms)Closed at 1732381781566 2024-11-23T17:09:41,568 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:09:41,569 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed f24d47972914c5327c02f79c10022d00 2024-11-23T17:09:41,570 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=f24d47972914c5327c02f79c10022d00, regionState=CLOSED 2024-11-23T17:09:41,570 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=5558f6bbdfa2afa0f1fb6712bb9ba0f1, regionState=CLOSED 2024-11-23T17:09:41,574 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:09:41,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:09:41,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-11-23T17:09:41,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure f24d47972914c5327c02f79c10022d00, server=1a7c5470b472,43453,1732381534825 in 252 msec 2024-11-23T17:09:41,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-11-23T17:09:41,585 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 5558f6bbdfa2afa0f1fb6712bb9ba0f1, server=1a7c5470b472,44711,1732381534872 in 254 msec 2024-11-23T17:09:41,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=f24d47972914c5327c02f79c10022d00, UNASSIGN in 262 msec 2024-11-23T17:09:41,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-11-23T17:09:41,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=5558f6bbdfa2afa0f1fb6712bb9ba0f1, UNASSIGN in 264 msec 2024-11-23T17:09:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742198_1374 (size=84) 2024-11-23T17:09:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742198_1374 (size=84) 2024-11-23T17:09:41,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742198_1374 (size=84) 2024-11-23T17:09:41,604 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:41,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742199_1375 (size=20) 2024-11-23T17:09:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742199_1375 (size=20) 2024-11-23T17:09:41,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742199_1375 (size=20) 2024-11-23T17:09:41,613 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:41,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:09:41,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:09:41,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:09:41,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742201_1377 (size=84) 2024-11-23T17:09:41,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742201_1377 (size=84) 2024-11-23T17:09:41,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742201_1377 (size=84) 2024-11-23T17:09:41,628 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:41,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:09:41,636 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-23T17:09:41,638 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780645.5558f6bbdfa2afa0f1fb6712bb9ba0f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:41,638 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732381780645.f24d47972914c5327c02f79c10022d00.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:41,638 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:09:41,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, ASSIGN}] 2024-11-23T17:09:41,643 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, ASSIGN 2024-11-23T17:09:41,643 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, ASSIGN; state=MERGED, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:09:41,794 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:09:41,794 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=101983e8b6b249eafb04f77c0ff5026a, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:41,796 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, ASSIGN because future has completed 2024-11-23T17:09:41,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:09:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:09:41,951 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:41,951 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 101983e8b6b249eafb04f77c0ff5026a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:09:41,952 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. service=AccessControlService 2024-11-23T17:09:41,952 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:09:41,952 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,952 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:09:41,952 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,952 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,953 INFO [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,954 INFO [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 101983e8b6b249eafb04f77c0ff5026a columnFamilyName cf 2024-11-23T17:09:41,954 DEBUG [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:09:41,962 DEBUG [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/69887fd8068246c4b1fe914aa21b8510.f24d47972914c5327c02f79c10022d00->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510-top 2024-11-23T17:09:41,966 DEBUG [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/90dbb359e9614fc4b58e7d28d9d868f6.5558f6bbdfa2afa0f1fb6712bb9ba0f1->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6-top 2024-11-23T17:09:41,966 INFO [StoreOpener-101983e8b6b249eafb04f77c0ff5026a-1 {}] regionserver.HStore(327): Store=101983e8b6b249eafb04f77c0ff5026a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:09:41,967 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,967 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,968 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,968 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,969 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,970 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,971 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 101983e8b6b249eafb04f77c0ff5026a; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71295116, jitterRate=0.06238001585006714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:09:41,971 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:41,971 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 101983e8b6b249eafb04f77c0ff5026a: Running coprocessor pre-open hook at 1732381781952Writing region info on filesystem at 1732381781952Initializing all the Stores at 1732381781953 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381781953Cleaning up temporary data from old regions at 1732381781969 (+16 ms)Running coprocessor post-open hooks at 1732381781971 (+2 ms)Region opened successfully at 1732381781971 2024-11-23T17:09:41,972 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a., pid=154, masterSystemTime=1732381781949 2024-11-23T17:09:41,972 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.,because compaction is disabled. 2024-11-23T17:09:41,973 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:41,973 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:41,974 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=101983e8b6b249eafb04f77c0ff5026a, regionState=OPEN, openSeqNum=9, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:41,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:09:41,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-23T17:09:41,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872 in 179 msec 2024-11-23T17:09:41,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-11-23T17:09:41,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, ASSIGN in 336 msec 2024-11-23T17:09:41,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[5558f6bbdfa2afa0f1fb6712bb9ba0f1, f24d47972914c5327c02f79c10022d00], force=true in 674 msec 2024-11-23T17:09:42,226 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000003/launch_container.sh] 2024-11-23T17:09:42,226 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000003/container_tokens] 2024-11-23T17:09:42,226 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000003/sysfs] 2024-11-23T17:09:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:09:42,448 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:09:42,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T17:09:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381782448 (current time:1732381782448). 2024-11-23T17:09:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:09:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-23T17:09:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:09:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d90607a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:42,450 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:42,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:42,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:42,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b3b4a18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:42,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:42,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,452 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42958, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:42,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e2478e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:42,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:42,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:42,454 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41306, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:42,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:09:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,455 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586444e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:09:42,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:09:42,456 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2928ea12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:09:42,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,458 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42962, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:09:42,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63a5736d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:09:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:09:42,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:09:42,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:09:42,460 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41322, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:09:42,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:09:42,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:09:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:09:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:09:42,462 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:09:42,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T17:09:42,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:09:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T17:09:42,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T17:09:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:09:42,465 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:09:42,465 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:09:42,467 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:09:42,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742202_1378 (size=216) 2024-11-23T17:09:42,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742202_1378 (size=216) 2024-11-23T17:09:42,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742202_1378 (size=216) 2024-11-23T17:09:42,473 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:09:42,474 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 101983e8b6b249eafb04f77c0ff5026a}] 2024-11-23T17:09:42,474 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:09:42,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-11-23T17:09:42,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 101983e8b6b249eafb04f77c0ff5026a: 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/69887fd8068246c4b1fe914aa21b8510.f24d47972914c5327c02f79c10022d00->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510-top, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/90dbb359e9614fc4b58e7d28d9d868f6.5558f6bbdfa2afa0f1fb6712bb9ba0f1->hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6-top] hfiles 2024-11-23T17:09:42,627 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/69887fd8068246c4b1fe914aa21b8510.f24d47972914c5327c02f79c10022d00 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/90dbb359e9614fc4b58e7d28d9d868f6.5558f6bbdfa2afa0f1fb6712bb9ba0f1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742203_1379 (size=269) 2024-11-23T17:09:42,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742203_1379 (size=269) 2024-11-23T17:09:42,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742203_1379 (size=269) 2024-11-23T17:09:42,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:42,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-23T17:09:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-11-23T17:09:42,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:42,636 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:42,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-11-23T17:09:42,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 101983e8b6b249eafb04f77c0ff5026a in 163 msec 2024-11-23T17:09:42,639 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:09:42,640 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:09:42,641 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:09:42,641 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,642 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742204_1380 (size=670) 2024-11-23T17:09:42,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742204_1380 (size=670) 2024-11-23T17:09:42,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742204_1380 (size=670) 2024-11-23T17:09:42,654 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:09:42,659 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:09:42,660 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,661 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:09:42,661 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T17:09:42,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 198 msec 2024-11-23T17:09:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:09:42,778 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:09:42,778 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778 2024-11-23T17:09:42,778 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:42,807 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:09:42,807 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,808 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:09:42,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:42,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742206_1382 (size=670) 2024-11-23T17:09:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742205_1381 (size=216) 2024-11-23T17:09:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742206_1382 (size=670) 2024-11-23T17:09:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742205_1381 (size=216) 2024-11-23T17:09:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742205_1381 (size=216) 2024-11-23T17:09:42,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742206_1382 (size=670) 2024-11-23T17:09:42,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:42,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:42,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,431 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0006_000001 (auth:SIMPLE) from 127.0.0.1:41742 2024-11-23T17:09:43,443 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000001/launch_container.sh] 2024-11-23T17:09:43,443 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000001/container_tokens] 2024-11-23T17:09:43,443 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0006/container_1732381541272_0006_01_000001/sysfs] 2024-11-23T17:09:43,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-13473414288662787288.jar 2024-11-23T17:09:43,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-18099350371954503805.jar 2024-11-23T17:09:43,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:09:43,914 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:09:43,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:09:43,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:09:43,915 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:09:43,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:09:43,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:09:43,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:09:43,916 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:09:43,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:09:43,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:09:43,917 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:09:43,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:43,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:43,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:43,918 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:43,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:09:43,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:43,919 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:09:43,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742207_1383 (size=24020) 2024-11-23T17:09:43,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742207_1383 (size=24020) 2024-11-23T17:09:43,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742207_1383 (size=24020) 2024-11-23T17:09:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742208_1384 (size=77755) 2024-11-23T17:09:44,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742208_1384 (size=77755) 2024-11-23T17:09:44,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742208_1384 (size=77755) 2024-11-23T17:09:44,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742209_1385 (size=131360) 2024-11-23T17:09:44,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742209_1385 (size=131360) 2024-11-23T17:09:44,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742209_1385 (size=131360) 2024-11-23T17:09:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742210_1386 (size=111793) 2024-11-23T17:09:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742210_1386 (size=111793) 2024-11-23T17:09:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742210_1386 (size=111793) 2024-11-23T17:09:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742211_1387 (size=1832290) 2024-11-23T17:09:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742211_1387 (size=1832290) 2024-11-23T17:09:44,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742211_1387 (size=1832290) 2024-11-23T17:09:44,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742212_1388 (size=8360005) 2024-11-23T17:09:44,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742212_1388 (size=8360005) 2024-11-23T17:09:44,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742212_1388 (size=8360005) 2024-11-23T17:09:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742213_1389 (size=503880) 2024-11-23T17:09:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742213_1389 (size=503880) 2024-11-23T17:09:44,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742213_1389 (size=503880) 2024-11-23T17:09:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742214_1390 (size=322274) 2024-11-23T17:09:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742214_1390 (size=322274) 2024-11-23T17:09:44,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742214_1390 (size=322274) 2024-11-23T17:09:44,144 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:09:44,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742215_1391 (size=20406) 2024-11-23T17:09:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742215_1391 (size=20406) 2024-11-23T17:09:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742215_1391 (size=20406) 2024-11-23T17:09:44,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742216_1392 (size=45609) 2024-11-23T17:09:44,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742216_1392 (size=45609) 2024-11-23T17:09:44,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742216_1392 (size=45609) 2024-11-23T17:09:44,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742217_1393 (size=136454) 2024-11-23T17:09:44,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742217_1393 (size=136454) 2024-11-23T17:09:44,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742217_1393 (size=136454) 2024-11-23T17:09:44,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742218_1394 (size=1597270) 2024-11-23T17:09:44,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742218_1394 (size=1597270) 2024-11-23T17:09:44,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742218_1394 (size=1597270) 2024-11-23T17:09:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742219_1395 (size=30873) 2024-11-23T17:09:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742219_1395 (size=30873) 2024-11-23T17:09:44,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742219_1395 (size=30873) 2024-11-23T17:09:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742220_1396 (size=29229) 2024-11-23T17:09:44,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742220_1396 (size=29229) 2024-11-23T17:09:44,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742220_1396 (size=29229) 2024-11-23T17:09:44,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742221_1397 (size=903661) 2024-11-23T17:09:44,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742221_1397 (size=903661) 2024-11-23T17:09:44,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742221_1397 (size=903661) 2024-11-23T17:09:44,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742222_1398 (size=6424742) 2024-11-23T17:09:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742222_1398 (size=6424742) 2024-11-23T17:09:44,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742222_1398 (size=6424742) 2024-11-23T17:09:44,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742223_1399 (size=440956) 2024-11-23T17:09:44,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742223_1399 (size=440956) 2024-11-23T17:09:44,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742223_1399 (size=440956) 2024-11-23T17:09:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742224_1400 (size=5175431) 2024-11-23T17:09:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742224_1400 (size=5175431) 2024-11-23T17:09:44,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742224_1400 (size=5175431) 2024-11-23T17:09:44,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742225_1401 (size=232881) 2024-11-23T17:09:44,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742225_1401 (size=232881) 2024-11-23T17:09:44,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742225_1401 (size=232881) 2024-11-23T17:09:44,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742226_1402 (size=1323991) 2024-11-23T17:09:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742226_1402 (size=1323991) 2024-11-23T17:09:44,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:09:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742226_1402 (size=1323991) 2024-11-23T17:09:44,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:09:44,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:44,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-23T17:09:44,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T17:09:44,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742227_1403 (size=4695811) 2024-11-23T17:09:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742227_1403 (size=4695811) 2024-11-23T17:09:44,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742227_1403 (size=4695811) 2024-11-23T17:09:44,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742228_1404 (size=1877034) 2024-11-23T17:09:44,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742228_1404 (size=1877034) 2024-11-23T17:09:44,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742228_1404 (size=1877034) 2024-11-23T17:09:44,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742229_1405 (size=217555) 2024-11-23T17:09:44,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742229_1405 (size=217555) 2024-11-23T17:09:44,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742229_1405 (size=217555) 2024-11-23T17:09:44,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742230_1406 (size=4188619) 2024-11-23T17:09:44,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742230_1406 (size=4188619) 2024-11-23T17:09:44,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742230_1406 (size=4188619) 2024-11-23T17:09:44,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742231_1407 (size=127628) 2024-11-23T17:09:44,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742231_1407 (size=127628) 2024-11-23T17:09:44,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742231_1407 (size=127628) 2024-11-23T17:09:44,452 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:09:44,455 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-23T17:09:44,457 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-11-23T17:09:44,457 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-11-23T17:09:44,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742232_1408 (size=481) 2024-11-23T17:09:44,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742232_1408 (size=481) 2024-11-23T17:09:44,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742232_1408 (size=481) 2024-11-23T17:09:44,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742233_1409 (size=21) 2024-11-23T17:09:44,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742233_1409 (size=21) 2024-11-23T17:09:44,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742233_1409 (size=21) 2024-11-23T17:09:44,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742234_1410 (size=304138) 2024-11-23T17:09:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742234_1410 (size=304138) 2024-11-23T17:09:44,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742234_1410 (size=304138) 2024-11-23T17:09:44,523 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:09:44,523 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:09:45,214 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:56878 2024-11-23T17:09:49,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:09:51,485 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:53720 2024-11-23T17:09:51,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742235_1411 (size=349836) 2024-11-23T17:09:51,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742235_1411 (size=349836) 2024-11-23T17:09:51,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742235_1411 (size=349836) 2024-11-23T17:09:53,716 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:52464 2024-11-23T17:09:53,716 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:46860 2024-11-23T17:09:57,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742236_1412 (size=4945) 2024-11-23T17:09:57,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742236_1412 (size=4945) 2024-11-23T17:09:57,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742236_1412 (size=4945) 2024-11-23T17:09:57,233 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000002/launch_container.sh] 2024-11-23T17:09:57,233 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000002/container_tokens] 2024-11-23T17:09:57,233 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000002/sysfs] 2024-11-23T17:09:57,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742238_1414 (size=4945) 2024-11-23T17:09:57,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742238_1414 (size=4945) 2024-11-23T17:09:57,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742238_1414 (size=4945) 2024-11-23T17:09:57,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742237_1413 (size=22243) 2024-11-23T17:09:57,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742237_1413 (size=22243) 2024-11-23T17:09:57,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742237_1413 (size=22243) 2024-11-23T17:09:57,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742239_1415 (size=482) 2024-11-23T17:09:57,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742239_1415 (size=482) 2024-11-23T17:09:57,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742239_1415 (size=482) 2024-11-23T17:09:57,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742240_1416 (size=22243) 2024-11-23T17:09:57,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742240_1416 (size=22243) 2024-11-23T17:09:57,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742240_1416 (size=22243) 2024-11-23T17:09:57,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742241_1417 (size=349836) 2024-11-23T17:09:57,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742241_1417 (size=349836) 2024-11-23T17:09:57,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742241_1417 (size=349836) 2024-11-23T17:09:57,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000003/launch_container.sh] 2024-11-23T17:09:57,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000003/container_tokens] 2024-11-23T17:09:57,791 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000003/sysfs] 2024-11-23T17:09:57,792 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:52480 2024-11-23T17:09:59,710 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:09:59,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:09:59,717 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:59,717 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:09:59,717 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:09:59,717 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:59,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T17:09:59,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T17:09:59,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:59,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T17:09:59,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381782778/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T17:09:59,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:09:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:09:59,726 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381799726"}]},"ts":"1732381799726"} 2024-11-23T17:09:59,728 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-23T17:09:59,728 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-23T17:09:59,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-23T17:09:59,730 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, UNASSIGN}] 2024-11-23T17:09:59,731 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, UNASSIGN 2024-11-23T17:09:59,731 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=101983e8b6b249eafb04f77c0ff5026a, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:09:59,733 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, UNASSIGN because future has completed 2024-11-23T17:09:59,733 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:09:59,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:09:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:09:59,885 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:59,885 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:09:59,885 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 101983e8b6b249eafb04f77c0ff5026a, disabling compactions & flushes 2024-11-23T17:09:59,885 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:59,885 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:59,885 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. after waiting 0 ms 2024-11-23T17:09:59,885 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:59,890 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-23T17:09:59,890 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:09:59,890 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a. 2024-11-23T17:09:59,890 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 101983e8b6b249eafb04f77c0ff5026a: Waiting for close lock at 1732381799885Running coprocessor pre-close hooks at 1732381799885Disabling compacts and flushes for region at 1732381799885Disabling writes for close at 1732381799885Writing region close event to WAL at 1732381799886 (+1 ms)Running coprocessor post-close hooks at 1732381799890 (+4 ms)Closed at 1732381799890 2024-11-23T17:09:59,892 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:09:59,893 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=101983e8b6b249eafb04f77c0ff5026a, regionState=CLOSED 2024-11-23T17:09:59,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:09:59,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T17:09:59,896 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 101983e8b6b249eafb04f77c0ff5026a, server=1a7c5470b472,44711,1732381534872 in 162 msec 2024-11-23T17:09:59,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-11-23T17:09:59,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=101983e8b6b249eafb04f77c0ff5026a, UNASSIGN in 166 msec 2024-11-23T17:09:59,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-23T17:09:59,900 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 169 msec 2024-11-23T17:09:59,901 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381799901"}]},"ts":"1732381799901"} 2024-11-23T17:09:59,903 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-23T17:09:59,903 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-23T17:09:59,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 180 msec 2024-11-23T17:10:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:10:00,048 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:10:00,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,050 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,051 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,053 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,054 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:10:00,054 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:10:00,054 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00 2024-11-23T17:10:00,055 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/recovered.edits] 2024-11-23T17:10:00,055 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/recovered.edits] 2024-11-23T17:10:00,056 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/recovered.edits] 2024-11-23T17:10:00,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:10:00,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:10:00,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:10:00,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:10:00,059 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/cf/69887fd8068246c4b1fe914aa21b8510 2024-11-23T17:10:00,059 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/69887fd8068246c4b1fe914aa21b8510.f24d47972914c5327c02f79c10022d00 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/69887fd8068246c4b1fe914aa21b8510.f24d47972914c5327c02f79c10022d00 2024-11-23T17:10:00,060 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/cf/90dbb359e9614fc4b58e7d28d9d868f6 2024-11-23T17:10:00,061 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/90dbb359e9614fc4b58e7d28d9d868f6.5558f6bbdfa2afa0f1fb6712bb9ba0f1 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/cf/90dbb359e9614fc4b58e7d28d9d868f6.5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-23T17:10:00,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:00,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:00,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:00,063 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:00,063 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/recovered.edits/8.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00/recovered.edits/8.seqid 2024-11-23T17:10:00,064 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/recovered.edits/8.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1/recovered.edits/8.seqid 2024-11-23T17:10:00,064 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/f24d47972914c5327c02f79c10022d00 2024-11-23T17:10:00,064 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/5558f6bbdfa2afa0f1fb6712bb9ba0f1 2024-11-23T17:10:00,065 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/recovered.edits/12.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a/recovered.edits/12.seqid 2024-11-23T17:10:00,065 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/101983e8b6b249eafb04f77c0ff5026a 2024-11-23T17:10:00,065 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-23T17:10:00,067 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,069 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-23T17:10:00,071 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-23T17:10:00,072 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,072 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-23T17:10:00,072 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381800072"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:00,073 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-23T17:10:00,073 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 101983e8b6b249eafb04f77c0ff5026a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T17:10:00,073 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-23T17:10:00,073 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381800073"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:00,075 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-23T17:10:00,075 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,077 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 26 msec 2024-11-23T17:10:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-23T17:10:00,168 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,168 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:10:00,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:10:00,172 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381800172"}]},"ts":"1732381800172"} 2024-11-23T17:10:00,173 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-23T17:10:00,173 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-23T17:10:00,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-23T17:10:00,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, UNASSIGN}] 2024-11-23T17:10:00,177 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, UNASSIGN 2024-11-23T17:10:00,177 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, UNASSIGN 2024-11-23T17:10:00,177 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=19dbb7b062c887aed1dc9076d6db561a, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:00,177 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=c6d43d02a62db34e972bce100a1c993f, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:00,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, UNASSIGN because future has completed 2024-11-23T17:10:00,179 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:00,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:10:00,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, UNASSIGN because future has completed 2024-11-23T17:10:00,180 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:00,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:10:00,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:10:00,331 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:10:00,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:00,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing c6d43d02a62db34e972bce100a1c993f, disabling compactions & flushes 2024-11-23T17:10:00,331 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:10:00,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:10:00,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. after waiting 0 ms 2024-11-23T17:10:00,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:10:00,332 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:10:00,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:00,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 19dbb7b062c887aed1dc9076d6db561a, disabling compactions & flushes 2024-11-23T17:10:00,332 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:10:00,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:10:00,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. after waiting 0 ms 2024-11-23T17:10:00,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:00,336 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a. 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 19dbb7b062c887aed1dc9076d6db561a: Waiting for close lock at 1732381800332Running coprocessor pre-close hooks at 1732381800332Disabling compacts and flushes for region at 1732381800332Disabling writes for close at 1732381800332Writing region close event to WAL at 1732381800333 (+1 ms)Running coprocessor post-close hooks at 1732381800336 (+3 ms)Closed at 1732381800336 2024-11-23T17:10:00,336 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f. 2024-11-23T17:10:00,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for c6d43d02a62db34e972bce100a1c993f: Waiting for close lock at 1732381800331Running coprocessor pre-close hooks at 1732381800331Disabling compacts and flushes for region at 1732381800331Disabling writes for close at 1732381800331Writing region close event to WAL at 1732381800332 (+1 ms)Running coprocessor post-close hooks at 1732381800336 (+4 ms)Closed at 1732381800336 2024-11-23T17:10:00,338 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:10:00,338 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=19dbb7b062c887aed1dc9076d6db561a, regionState=CLOSED 2024-11-23T17:10:00,339 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:10:00,339 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=c6d43d02a62db34e972bce100a1c993f, regionState=CLOSED 2024-11-23T17:10:00,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:10:00,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:10:00,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-11-23T17:10:00,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 19dbb7b062c887aed1dc9076d6db561a, server=1a7c5470b472,44711,1732381534872 in 161 msec 2024-11-23T17:10:00,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-11-23T17:10:00,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=19dbb7b062c887aed1dc9076d6db561a, UNASSIGN in 166 msec 2024-11-23T17:10:00,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure c6d43d02a62db34e972bce100a1c993f, server=1a7c5470b472,43453,1732381534825 in 163 msec 2024-11-23T17:10:00,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-11-23T17:10:00,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=c6d43d02a62db34e972bce100a1c993f, UNASSIGN in 167 msec 2024-11-23T17:10:00,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-23T17:10:00,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 171 msec 2024-11-23T17:10:00,348 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381800347"}]},"ts":"1732381800347"} 2024-11-23T17:10:00,349 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-23T17:10:00,349 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-23T17:10:00,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 180 msec 2024-11-23T17:10:00,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:10:00,488 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:10:00,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,490 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,491 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,493 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,494 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:10:00,494 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:10:00,496 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/recovered.edits] 2024-11-23T17:10:00,496 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/recovered.edits] 2024-11-23T17:10:00,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:10:00,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:10:00,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:10:00,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:00,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-23T17:10:00,500 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/cf/a9d4bcb30b774928a8734eca1d1b57cd 2024-11-23T17:10:00,501 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/cf/6e0925d7c7c244d38b12013dd4ff0c6a 2024-11-23T17:10:00,503 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f/recovered.edits/9.seqid 2024-11-23T17:10:00,503 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a/recovered.edits/9.seqid 2024-11-23T17:10:00,504 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:10:00,504 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithMergeRegion/19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:10:00,504 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-23T17:10:00,504 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-23T17:10:00,505 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-23T17:10:00,508 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241123760edd42619a4271820cedd70bcc7b40_19dbb7b062c887aed1dc9076d6db561a 2024-11-23T17:10:00,508 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241123a2be3c3c86b94da2bd039be84d7f45a5_c6d43d02a62db34e972bce100a1c993f 2024-11-23T17:10:00,509 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-23T17:10:00,511 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,513 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-23T17:10:00,515 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-23T17:10:00,516 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,516 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-23T17:10:00,516 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381800516"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:00,516 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381800516"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:00,518 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:10:00,518 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c6d43d02a62db34e972bce100a1c993f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732381778950.c6d43d02a62db34e972bce100a1c993f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 19dbb7b062c887aed1dc9076d6db561a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732381778950.19dbb7b062c887aed1dc9076d6db561a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:10:00,518 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-23T17:10:00,518 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381800518"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:00,519 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-23T17:10:00,520 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 32 msec 2024-11-23T17:10:00,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-23T17:10:00,608 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,608 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:10:00,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T17:10:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T17:10:00,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:00,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-23T17:10:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:00,643 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=817 (was 805) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18294) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:34558 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6415 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:51436 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1391265517_1 at /127.0.0.1:34514 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35121 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:35121 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:33289 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:48644 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=822 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=574 (was 732), ProcessCount=14 (was 17), AvailableMemoryMB=2855 (was 2786) - AvailableMemoryMB LEAK? - 2024-11-23T17:10:00,644 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-11-23T17:10:00,660 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=817, OpenFileDescriptor=822, MaxFileDescriptor=1048576, SystemLoadAverage=574, ProcessCount=14, AvailableMemoryMB=2855 2024-11-23T17:10:00,660 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-11-23T17:10:00,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:10:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:00,663 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:10:00,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-11-23T17:10:00,664 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:10:00,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:10:00,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742242_1418 (size=443) 2024-11-23T17:10:00,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742242_1418 (size=443) 2024-11-23T17:10:00,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742242_1418 (size=443) 2024-11-23T17:10:00,673 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b01b4a0d5a04a2232b0b7ceae0e52b7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:00,673 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bae5f9b5ab6880e448eddfffe03e1e3e, NAME => 'testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:00,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742243_1419 (size=68) 2024-11-23T17:10:00,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742243_1419 (size=68) 2024-11-23T17:10:00,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742243_1419 (size=68) 2024-11-23T17:10:00,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742244_1420 (size=68) 2024-11-23T17:10:00,680 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:00,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742244_1420 (size=68) 2024-11-23T17:10:00,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742244_1420 (size=68) 2024-11-23T17:10:00,680 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing bae5f9b5ab6880e448eddfffe03e1e3e, disabling compactions & flushes 2024-11-23T17:10:00,680 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:00,680 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. after waiting 0 ms 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:00,681 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for bae5f9b5ab6880e448eddfffe03e1e3e: Waiting for close lock at 1732381800680Disabling compacts and flushes for region at 1732381800680Disabling writes for close at 1732381800681 (+1 ms)Writing region close event to WAL at 1732381800681Closed at 1732381800681 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing b01b4a0d5a04a2232b0b7ceae0e52b7b, disabling compactions & flushes 2024-11-23T17:10:00,681 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:00,681 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. after waiting 0 ms 2024-11-23T17:10:00,682 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:00,682 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:00,682 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for b01b4a0d5a04a2232b0b7ceae0e52b7b: Waiting for close lock at 1732381800681Disabling compacts and flushes for region at 1732381800681Disabling writes for close at 1732381800681Writing region close event to WAL at 1732381800682 (+1 ms)Closed at 1732381800682 2024-11-23T17:10:00,682 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:10:00,683 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732381800683"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381800683"}]},"ts":"1732381800683"} 2024-11-23T17:10:00,683 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732381800683"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381800683"}]},"ts":"1732381800683"} 2024-11-23T17:10:00,685 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:10:00,685 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:10:00,686 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381800685"}]},"ts":"1732381800685"} 2024-11-23T17:10:00,687 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T17:10:00,687 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:10:00,688 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:10:00,688 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:10:00,688 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:10:00,688 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:10:00,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, ASSIGN}] 2024-11-23T17:10:00,689 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, ASSIGN 2024-11-23T17:10:00,689 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, ASSIGN 2024-11-23T17:10:00,690 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:10:00,690 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:10:00,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:10:00,840 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:10:00,841 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=bae5f9b5ab6880e448eddfffe03e1e3e, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:00,841 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=b01b4a0d5a04a2232b0b7ceae0e52b7b, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:00,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, ASSIGN because future has completed 2024-11-23T17:10:00,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:10:00,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, ASSIGN because future has completed 2024-11-23T17:10:00,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:00,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:10:00,997 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:00,997 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => b01b4a0d5a04a2232b0b7ceae0e52b7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:10:00,998 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. service=AccessControlService 2024-11-23T17:10:00,998 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:00,998 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:00,998 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:00,998 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:00,998 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:00,998 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => bae5f9b5ab6880e448eddfffe03e1e3e, NAME => 'testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. service=AccessControlService 2024-11-23T17:10:00,999 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:00,999 INFO [StoreOpener-b01b4a0d5a04a2232b0b7ceae0e52b7b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:00,999 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,000 INFO [StoreOpener-bae5f9b5ab6880e448eddfffe03e1e3e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,000 INFO [StoreOpener-b01b4a0d5a04a2232b0b7ceae0e52b7b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b01b4a0d5a04a2232b0b7ceae0e52b7b columnFamilyName cf 2024-11-23T17:10:01,001 DEBUG [StoreOpener-b01b4a0d5a04a2232b0b7ceae0e52b7b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,001 INFO [StoreOpener-bae5f9b5ab6880e448eddfffe03e1e3e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bae5f9b5ab6880e448eddfffe03e1e3e columnFamilyName cf 2024-11-23T17:10:01,002 INFO [StoreOpener-b01b4a0d5a04a2232b0b7ceae0e52b7b-1 {}] regionserver.HStore(327): Store=b01b4a0d5a04a2232b0b7ceae0e52b7b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:01,002 DEBUG [StoreOpener-bae5f9b5ab6880e448eddfffe03e1e3e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,002 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,002 INFO [StoreOpener-bae5f9b5ab6880e448eddfffe03e1e3e-1 {}] regionserver.HStore(327): Store=bae5f9b5ab6880e448eddfffe03e1e3e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:01,002 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,002 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,003 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,003 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,003 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,003 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,003 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,004 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,004 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,005 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:01,006 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened b01b4a0d5a04a2232b0b7ceae0e52b7b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65260465, jitterRate=-0.027543291449546814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:01,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:01,006 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,007 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened bae5f9b5ab6880e448eddfffe03e1e3e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73252060, jitterRate=0.09154075384140015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:01,007 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,007 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for bae5f9b5ab6880e448eddfffe03e1e3e: Running coprocessor pre-open hook at 1732381800999Writing region info on filesystem at 1732381800999Initializing all the Stores at 1732381801000 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381801000Cleaning up temporary data from old regions at 1732381801004 (+4 ms)Running coprocessor post-open hooks at 1732381801007 (+3 ms)Region opened successfully at 1732381801007 2024-11-23T17:10:01,007 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for b01b4a0d5a04a2232b0b7ceae0e52b7b: Running coprocessor pre-open hook at 1732381800998Writing region info on filesystem at 1732381800998Initializing all the Stores at 1732381800999 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381800999Cleaning up temporary data from old regions at 1732381801003 (+4 ms)Running coprocessor post-open hooks at 1732381801006 (+3 ms)Region opened successfully at 1732381801007 (+1 ms) 2024-11-23T17:10:01,008 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b., pid=172, masterSystemTime=1732381800994 2024-11-23T17:10:01,008 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e., pid=173, masterSystemTime=1732381800995 2024-11-23T17:10:01,009 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,009 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,010 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=b01b4a0d5a04a2232b0b7ceae0e52b7b, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:01,010 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,010 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,011 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=bae5f9b5ab6880e448eddfffe03e1e3e, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:01,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:10:01,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:01,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-23T17:10:01,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825 in 170 msec 2024-11-23T17:10:01,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, ASSIGN in 326 msec 2024-11-23T17:10:01,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-23T17:10:01,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675 in 171 msec 2024-11-23T17:10:01,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T17:10:01,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, ASSIGN in 329 msec 2024-11-23T17:10:01,019 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:10:01,019 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381801019"}]},"ts":"1732381801019"} 2024-11-23T17:10:01,020 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T17:10:01,021 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:10:01,021 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T17:10:01,024 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:10:01,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:01,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:01,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:01,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:01,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:01,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:01,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:01,030 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:01,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 368 msec 2024-11-23T17:10:01,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:10:01,288 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:10:01,288 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,290 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-23T17:10:01,290 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,291 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:01,292 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,296 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,300 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381801303 (current time:1732381801303). 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a2d7a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:01,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:01,304 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:01,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:01,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:01,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a99be22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:01,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:01,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,306 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:01,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b13f00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:01,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:01,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:01,308 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:01,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:10:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,309 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a371fde, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:01,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:01,311 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@470d0d3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:01,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,312 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56258, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:01,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f24822, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:01,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:01,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:01,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:01,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:01,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:10:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,317 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:10:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:10:01,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T17:10:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:10:01,320 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:01,320 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:01,322 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742245_1421 (size=170) 2024-11-23T17:10:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742245_1421 (size=170) 2024-11-23T17:10:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742245_1421 (size=170) 2024-11-23T17:10:01,329 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:01,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b}] 2024-11-23T17:10:01,330 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,330 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:10:01,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-11-23T17:10:01,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for bae5f9b5ab6880e448eddfffe03e1e3e: 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for b01b4a0d5a04a2232b0b7ceae0e52b7b: 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:01,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:01,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742246_1422 (size=71) 2024-11-23T17:10:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742246_1422 (size=71) 2024-11-23T17:10:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742247_1423 (size=71) 2024-11-23T17:10:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742247_1423 (size=71) 2024-11-23T17:10:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742246_1422 (size=71) 2024-11-23T17:10:01,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-23T17:10:01,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742247_1423 (size=71) 2024-11-23T17:10:01,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-11-23T17:10:01,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-11-23T17:10:01,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-11-23T17:10:01,495 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,495 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,495 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b in 167 msec 2024-11-23T17:10:01,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-11-23T17:10:01,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e in 167 msec 2024-11-23T17:10:01,498 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:01,499 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:01,500 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:01,500 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:01,500 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,500 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:10:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742248_1424 (size=63) 2024-11-23T17:10:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742248_1424 (size=63) 2024-11-23T17:10:01,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742248_1424 (size=63) 2024-11-23T17:10:01,506 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:01,507 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,507 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742249_1425 (size=653) 2024-11-23T17:10:01,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742249_1425 (size=653) 2024-11-23T17:10:01,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742249_1425 (size=653) 2024-11-23T17:10:01,518 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:01,522 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:01,522 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,524 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:01,524 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T17:10:01,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 206 msec 2024-11-23T17:10:01,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:10:01,638 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:10:01,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:01,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:01,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,648 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-23T17:10:01,648 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,648 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:01,649 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,653 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,658 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:01,660 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381801660 (current time:1732381801660). 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fd7a7a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:01,661 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:01,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:01,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:01,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3933303c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:01,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:01,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,663 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:01,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d14401b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:01,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:01,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:01,665 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:01,666 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:01,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:01,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,666 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78681f00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:01,668 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f2b569b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:01,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,669 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:01,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@643585a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:01,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:01,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:01,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:01,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:01,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:01,674 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:01,674 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:10:01,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:01,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:10:01,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T17:10:01,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:10:01,677 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:01,678 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:01,680 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:01,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742250_1426 (size=165) 2024-11-23T17:10:01,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742250_1426 (size=165) 2024-11-23T17:10:01,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742250_1426 (size=165) 2024-11-23T17:10:01,686 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:01,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b}] 2024-11-23T17:10:01,687 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,687 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:10:01,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-11-23T17:10:01,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-11-23T17:10:01,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,839 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing bae5f9b5ab6880e448eddfffe03e1e3e 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T17:10:01,839 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing b01b4a0d5a04a2232b0b7ceae0e52b7b 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T17:10:01,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e is 71, key is 02a9c86c7682b4aa9670d056c1ee8a2b/cf:q/1732381801643/Put/seqid=0 2024-11-23T17:10:01,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b is 71, key is 1f5c65edd30c618a816117a96dcb89db/cf:q/1732381801644/Put/seqid=0 2024-11-23T17:10:01,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742251_1427 (size=5311) 2024-11-23T17:10:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742251_1427 (size=5311) 2024-11-23T17:10:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742252_1428 (size=7962) 2024-11-23T17:10:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742252_1428 (size=7962) 2024-11-23T17:10:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742251_1427 (size=5311) 2024-11-23T17:10:01,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742252_1428 (size=7962) 2024-11-23T17:10:01,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,865 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,869 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/.tmp/cf/3ce7d56a76f744b78497b2e8eafd9843, store: [table=testtb-testExportExpiredSnapshot family=cf region=b01b4a0d5a04a2232b0b7ceae0e52b7b] 2024-11-23T17:10:01,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/.tmp/cf/1250447bceda4404be2413b8c6ae5c3c, store: [table=testtb-testExportExpiredSnapshot family=cf region=bae5f9b5ab6880e448eddfffe03e1e3e] 2024-11-23T17:10:01,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/.tmp/cf/3ce7d56a76f744b78497b2e8eafd9843 is 209, key is 164eca6e5a7ed36e1fea8babf5def125e/cf:q/1732381801644/Put/seqid=0 2024-11-23T17:10:01,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/.tmp/cf/1250447bceda4404be2413b8c6ae5c3c is 209, key is 03e84d1db7d11cc003b2f6eac7922f64d/cf:q/1732381801643/Put/seqid=0 2024-11-23T17:10:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742253_1429 (size=14384) 2024-11-23T17:10:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742254_1430 (size=6531) 2024-11-23T17:10:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742253_1429 (size=14384) 2024-11-23T17:10:01,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742254_1430 (size=6531) 2024-11-23T17:10:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742254_1430 (size=6531) 2024-11-23T17:10:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742253_1429 (size=14384) 2024-11-23T17:10:01,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/.tmp/cf/3ce7d56a76f744b78497b2e8eafd9843 2024-11-23T17:10:01,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/.tmp/cf/1250447bceda4404be2413b8c6ae5c3c 2024-11-23T17:10:01,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/.tmp/cf/3ce7d56a76f744b78497b2e8eafd9843 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843 2024-11-23T17:10:01,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/.tmp/cf/1250447bceda4404be2413b8c6ae5c3c as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c 2024-11-23T17:10:01,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843, entries=44, sequenceid=6, filesize=14.0 K 2024-11-23T17:10:01,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c, entries=6, sequenceid=6, filesize=6.4 K 2024-11-23T17:10:01,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for b01b4a0d5a04a2232b0b7ceae0e52b7b in 45ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:01,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-23T17:10:01,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for bae5f9b5ab6880e448eddfffe03e1e3e in 46ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for b01b4a0d5a04a2232b0b7ceae0e52b7b: 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for bae5f9b5ab6880e448eddfffe03e1e3e: 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843] hfiles 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c] hfiles 2024-11-23T17:10:01,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742255_1431 (size=110) 2024-11-23T17:10:01,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742255_1431 (size=110) 2024-11-23T17:10:01,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742255_1431 (size=110) 2024-11-23T17:10:01,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:01,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-11-23T17:10:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-11-23T17:10:01,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,895 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b in 209 msec 2024-11-23T17:10:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742256_1432 (size=110) 2024-11-23T17:10:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742256_1432 (size=110) 2024-11-23T17:10:01,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742256_1432 (size=110) 2024-11-23T17:10:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:01,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-23T17:10:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-11-23T17:10:01,899 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,899 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-11-23T17:10:01,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e in 213 msec 2024-11-23T17:10:01,901 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:01,902 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:01,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:01,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e] hfiles 2024-11-23T17:10:01,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:01,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742257_1433 (size=294) 2024-11-23T17:10:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742257_1433 (size=294) 2024-11-23T17:10:01,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742257_1433 (size=294) 2024-11-23T17:10:01,910 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:01,910 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,910 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742258_1434 (size=963) 2024-11-23T17:10:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742258_1434 (size=963) 2024-11-23T17:10:01,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742258_1434 (size=963) 2024-11-23T17:10:01,922 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:01,927 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:01,927 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:01,928 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:01,928 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T17:10:01,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 253 msec 2024-11-23T17:10:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:10:01,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:10:01,999 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:10:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-23T17:10:02,001 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:10:02,001 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-11-23T17:10:02,002 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:10:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:10:02,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742259_1435 (size=436) 2024-11-23T17:10:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742259_1435 (size=436) 2024-11-23T17:10:02,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742259_1435 (size=436) 2024-11-23T17:10:02,009 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d701f2f82212e8f3b780ec9dae20e678, NAME => 'testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:02,010 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5c659a536d4a93accf1b401f39f6f967, NAME => 'testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:02,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742260_1436 (size=61) 2024-11-23T17:10:02,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742261_1437 (size=61) 2024-11-23T17:10:02,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742261_1437 (size=61) 2024-11-23T17:10:02,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742260_1436 (size=61) 2024-11-23T17:10:02,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742261_1437 (size=61) 2024-11-23T17:10:02,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742260_1436 (size=61) 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing d701f2f82212e8f3b780ec9dae20e678, disabling compactions & flushes 2024-11-23T17:10:02,017 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. after waiting 0 ms 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,017 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,017 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for d701f2f82212e8f3b780ec9dae20e678: Waiting for close lock at 1732381802017Disabling compacts and flushes for region at 1732381802017Disabling writes for close at 1732381802017Writing region close event to WAL at 1732381802017Closed at 1732381802017 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 5c659a536d4a93accf1b401f39f6f967, disabling compactions & flushes 2024-11-23T17:10:02,018 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. after waiting 0 ms 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,018 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,018 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5c659a536d4a93accf1b401f39f6f967: Waiting for close lock at 1732381802018Disabling compacts and flushes for region at 1732381802018Disabling writes for close at 1732381802018Writing region close event to WAL at 1732381802018Closed at 1732381802018 2024-11-23T17:10:02,019 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:10:02,019 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732381802019"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381802019"}]},"ts":"1732381802019"} 2024-11-23T17:10:02,019 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732381802019"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381802019"}]},"ts":"1732381802019"} 2024-11-23T17:10:02,022 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:10:02,022 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:10:02,022 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381802022"}]},"ts":"1732381802022"} 2024-11-23T17:10:02,024 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T17:10:02,024 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:10:02,025 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:10:02,025 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:10:02,025 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:10:02,025 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:10:02,025 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d701f2f82212e8f3b780ec9dae20e678, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, ASSIGN}] 2024-11-23T17:10:02,026 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, ASSIGN 2024-11-23T17:10:02,026 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d701f2f82212e8f3b780ec9dae20e678, ASSIGN 2024-11-23T17:10:02,027 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:10:02,027 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d701f2f82212e8f3b780ec9dae20e678, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:10:02,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:10:02,177 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:10:02,178 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d701f2f82212e8f3b780ec9dae20e678, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:02,178 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:02,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, ASSIGN because future has completed 2024-11-23T17:10:02,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:02,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d701f2f82212e8f3b780ec9dae20e678, ASSIGN because future has completed 2024-11-23T17:10:02,180 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d701f2f82212e8f3b780ec9dae20e678, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:10:02,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:10:02,335 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c659a536d4a93accf1b401f39f6f967, NAME => 'testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. service=AccessControlService 2024-11-23T17:10:02,335 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => d701f2f82212e8f3b780ec9dae20e678, NAME => 'testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:10:02,335 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. service=AccessControlService 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,335 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,335 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:02,336 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,336 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:02,336 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,336 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,337 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,337 INFO [StoreOpener-d701f2f82212e8f3b780ec9dae20e678-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,338 INFO [StoreOpener-d701f2f82212e8f3b780ec9dae20e678-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d701f2f82212e8f3b780ec9dae20e678 columnFamilyName cf 2024-11-23T17:10:02,338 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c659a536d4a93accf1b401f39f6f967 columnFamilyName cf 2024-11-23T17:10:02,339 DEBUG [StoreOpener-d701f2f82212e8f3b780ec9dae20e678-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:02,339 DEBUG [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:02,339 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] regionserver.HStore(327): Store=5c659a536d4a93accf1b401f39f6f967/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:02,339 INFO [StoreOpener-d701f2f82212e8f3b780ec9dae20e678-1 {}] regionserver.HStore(327): Store=d701f2f82212e8f3b780ec9dae20e678/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:02,339 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,339 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,340 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,341 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,342 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,342 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,343 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:02,344 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:02,344 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened d701f2f82212e8f3b780ec9dae20e678; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65102051, jitterRate=-0.029903843998908997}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:02,344 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,344 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 5c659a536d4a93accf1b401f39f6f967; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74379571, jitterRate=0.10834197700023651}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:02,344 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,345 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for d701f2f82212e8f3b780ec9dae20e678: Running coprocessor pre-open hook at 1732381802336Writing region info on filesystem at 1732381802336Initializing all the Stores at 1732381802336Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381802336Cleaning up temporary data from old regions at 1732381802341 (+5 ms)Running coprocessor post-open hooks at 1732381802344 (+3 ms)Region opened successfully at 1732381802344 2024-11-23T17:10:02,345 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 5c659a536d4a93accf1b401f39f6f967: Running coprocessor pre-open hook at 1732381802335Writing region info on filesystem at 1732381802336 (+1 ms)Initializing all the Stores at 1732381802336Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381802336Cleaning up temporary data from old regions at 1732381802341 (+5 ms)Running coprocessor post-open hooks at 1732381802344 (+3 ms)Region opened successfully at 1732381802344 2024-11-23T17:10:02,345 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967., pid=183, masterSystemTime=1732381802331 2024-11-23T17:10:02,345 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678., pid=184, masterSystemTime=1732381802332 2024-11-23T17:10:02,347 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,347 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,347 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=d701f2f82212e8f3b780ec9dae20e678, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:02,348 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,348 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:02,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure d701f2f82212e8f3b780ec9dae20e678, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:10:02,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:02,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-11-23T17:10:02,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure d701f2f82212e8f3b780ec9dae20e678, server=1a7c5470b472,44711,1732381534872 in 169 msec 2024-11-23T17:10:02,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-11-23T17:10:02,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675 in 170 msec 2024-11-23T17:10:02,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=d701f2f82212e8f3b780ec9dae20e678, ASSIGN in 326 msec 2024-11-23T17:10:02,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-23T17:10:02,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, ASSIGN in 327 msec 2024-11-23T17:10:02,354 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:10:02,355 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381802354"}]},"ts":"1732381802354"} 2024-11-23T17:10:02,356 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T17:10:02,357 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:10:02,357 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T17:10:02,360 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:10:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:02,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:02,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:02,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 372 msec 2024-11-23T17:10:02,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:10:02,628 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T17:10:02,628 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-23T17:10:02,630 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,630 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:02,631 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,636 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,641 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:02,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:02,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-23T17:10:02,652 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,652 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:02,654 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,658 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:10:02,663 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T17:10:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:10:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@348b8fc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:02,664 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:02,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:02,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:02,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5eef9fac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:02,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:02,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,666 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:02,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11822cec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:02,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:02,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:02,668 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51844, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:02,669 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,669 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12fbc9da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:02,670 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:02,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:02,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:02,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7833985, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:02,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:02,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,671 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:02,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e19089d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:02,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:02,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:02,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:02,673 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:02,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:02,675 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:02,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:10:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:02,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T17:10:02,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T17:10:02,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:10:02,678 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:02,679 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:02,681 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742262_1438 (size=152) 2024-11-23T17:10:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742262_1438 (size=152) 2024-11-23T17:10:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742262_1438 (size=152) 2024-11-23T17:10:02,687 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:02,688 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d701f2f82212e8f3b780ec9dae20e678}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c659a536d4a93accf1b401f39f6f967}] 2024-11-23T17:10:02,688 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,688 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:10:02,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-11-23T17:10:02,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-11-23T17:10:02,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,840 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing d701f2f82212e8f3b780ec9dae20e678 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T17:10:02,840 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 5c659a536d4a93accf1b401f39f6f967 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T17:10:02,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123fd41ed01bedb494996ef5b8af18f5798_d701f2f82212e8f3b780ec9dae20e678 is 71, key is 0313ca233915f274c972a0b7bc7476aa/cf:q/1732381802647/Put/seqid=0 2024-11-23T17:10:02,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112383e508d3ce0f47dd859575546f42c009_5c659a536d4a93accf1b401f39f6f967 is 71, key is 1252b980da8a3ca70ae7ff78ac5e7ff5/cf:q/1732381802649/Put/seqid=0 2024-11-23T17:10:02,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742263_1439 (size=8172) 2024-11-23T17:10:02,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742263_1439 (size=8172) 2024-11-23T17:10:02,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742263_1439 (size=8172) 2024-11-23T17:10:02,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:02,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112383e508d3ce0f47dd859575546f42c009_5c659a536d4a93accf1b401f39f6f967 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112383e508d3ce0f47dd859575546f42c009_5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/.tmp/cf/8528eaa103da423aa13583ffe8cb96df, store: [table=testExportExpiredSnapshot family=cf region=5c659a536d4a93accf1b401f39f6f967] 2024-11-23T17:10:02,869 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/.tmp/cf/8528eaa103da423aa13583ffe8cb96df is 202, key is 101a2004c550a09b28e61a1d1a3b2d707/cf:q/1732381802649/Put/seqid=0 2024-11-23T17:10:02,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742264_1440 (size=5101) 2024-11-23T17:10:02,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742264_1440 (size=5101) 2024-11-23T17:10:02,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742264_1440 (size=5101) 2024-11-23T17:10:02,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:02,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742265_1441 (size=14663) 2024-11-23T17:10:02,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742265_1441 (size=14663) 2024-11-23T17:10:02,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742265_1441 (size=14663) 2024-11-23T17:10:02,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/.tmp/cf/8528eaa103da423aa13583ffe8cb96df 2024-11-23T17:10:02,876 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123fd41ed01bedb494996ef5b8af18f5798_d701f2f82212e8f3b780ec9dae20e678 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241123fd41ed01bedb494996ef5b8af18f5798_d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/.tmp/cf/9ff4b27df15844dd978002c753e82ee9, store: [table=testExportExpiredSnapshot family=cf region=d701f2f82212e8f3b780ec9dae20e678] 2024-11-23T17:10:02,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/.tmp/cf/9ff4b27df15844dd978002c753e82ee9 is 202, key is 050d398bde1d0a2e97c1c6ec9a8fd3f3a/cf:q/1732381802647/Put/seqid=0 2024-11-23T17:10:02,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/.tmp/cf/8528eaa103da423aa13583ffe8cb96df as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/cf/8528eaa103da423aa13583ffe8cb96df 2024-11-23T17:10:02,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742266_1442 (size=5888) 2024-11-23T17:10:02,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742266_1442 (size=5888) 2024-11-23T17:10:02,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742266_1442 (size=5888) 2024-11-23T17:10:02,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/.tmp/cf/9ff4b27df15844dd978002c753e82ee9 2024-11-23T17:10:02,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/cf/8528eaa103da423aa13583ffe8cb96df, entries=47, sequenceid=5, filesize=14.3 K 2024-11-23T17:10:02,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 5c659a536d4a93accf1b401f39f6f967 in 45ms, sequenceid=5, compaction requested=false 2024-11-23T17:10:02,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-23T17:10:02,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 5c659a536d4a93accf1b401f39f6f967: 2024-11-23T17:10:02,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T17:10:02,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:02,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/cf/8528eaa103da423aa13583ffe8cb96df] hfiles 2024-11-23T17:10:02,886 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/cf/8528eaa103da423aa13583ffe8cb96df for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,887 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/.tmp/cf/9ff4b27df15844dd978002c753e82ee9 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/cf/9ff4b27df15844dd978002c753e82ee9 2024-11-23T17:10:02,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742267_1443 (size=103) 2024-11-23T17:10:02,892 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/cf/9ff4b27df15844dd978002c753e82ee9, entries=3, sequenceid=5, filesize=5.8 K 2024-11-23T17:10:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742267_1443 (size=103) 2024-11-23T17:10:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742267_1443 (size=103) 2024-11-23T17:10:02,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:02,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-11-23T17:10:02,893 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d701f2f82212e8f3b780ec9dae20e678 in 52ms, sequenceid=5, compaction requested=false 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for d701f2f82212e8f3b780ec9dae20e678: 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/cf/9ff4b27df15844dd978002c753e82ee9] hfiles 2024-11-23T17:10:02,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/cf/9ff4b27df15844dd978002c753e82ee9 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-11-23T17:10:02,893 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,893 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,895 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5c659a536d4a93accf1b401f39f6f967 in 206 msec 2024-11-23T17:10:02,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742268_1444 (size=103) 2024-11-23T17:10:02,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742268_1444 (size=103) 2024-11-23T17:10:02,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742268_1444 (size=103) 2024-11-23T17:10:02,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:10:02,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-23T17:10:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-11-23T17:10:02,899 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,899 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-11-23T17:10:02,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d701f2f82212e8f3b780ec9dae20e678 in 212 msec 2024-11-23T17:10:02,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:02,902 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:02,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:02,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:02,903 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:02,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112383e508d3ce0f47dd859575546f42c009_5c659a536d4a93accf1b401f39f6f967, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241123fd41ed01bedb494996ef5b8af18f5798_d701f2f82212e8f3b780ec9dae20e678] hfiles 2024-11-23T17:10:02,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b2024112383e508d3ce0f47dd859575546f42c009_5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:02,904 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241123fd41ed01bedb494996ef5b8af18f5798_d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:10:02,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742269_1445 (size=287) 2024-11-23T17:10:02,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742269_1445 (size=287) 2024-11-23T17:10:02,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742269_1445 (size=287) 2024-11-23T17:10:02,911 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:02,911 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,911 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742270_1446 (size=935) 2024-11-23T17:10:02,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742270_1446 (size=935) 2024-11-23T17:10:02,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742270_1446 (size=935) 2024-11-23T17:10:02,921 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:02,926 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:02,927 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-23T17:10:02,928 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:02,928 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T17:10:02,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 252 msec 2024-11-23T17:10:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:10:02,998 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T17:10:03,179 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:10:03,873 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0007_000001 (auth:SIMPLE) from 127.0.0.1:49392 2024-11-23T17:10:03,885 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000001/launch_container.sh] 2024-11-23T17:10:03,885 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000001/container_tokens] 2024-11-23T17:10:03,885 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0007/container_1732381541272_0007_01_000001/sysfs] 2024-11-23T17:10:04,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T17:10:04,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T17:10:04,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T17:10:04,341 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T17:10:04,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:10:04,342 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:10:05,415 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:10:13,005 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381813005 2024-11-23T17:10:13,005 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381813005, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381813005, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:13,035 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:13,035 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381813005, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381813005/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T17:10:13,037 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:10:13,038 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:10:13,039 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:10:13,043 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381813043"}]},"ts":"1732381813043"} 2024-11-23T17:10:13,045 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-23T17:10:13,045 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-23T17:10:13,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-23T17:10:13,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, UNASSIGN}] 2024-11-23T17:10:13,048 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, UNASSIGN 2024-11-23T17:10:13,048 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, UNASSIGN 2024-11-23T17:10:13,049 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=bae5f9b5ab6880e448eddfffe03e1e3e, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:13,049 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=b01b4a0d5a04a2232b0b7ceae0e52b7b, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:13,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, UNASSIGN because future has completed 2024-11-23T17:10:13,051 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:13,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:10:13,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, UNASSIGN because future has completed 2024-11-23T17:10:13,052 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:13,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:10:13,203 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:13,203 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing b01b4a0d5a04a2232b0b7ceae0e52b7b, disabling compactions & flushes 2024-11-23T17:10:13,204 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. after waiting 0 ms 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:13,204 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing bae5f9b5ab6880e448eddfffe03e1e3e, disabling compactions & flushes 2024-11-23T17:10:13,204 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. after waiting 0 ms 2024-11-23T17:10:13,204 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:13,207 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:13,207 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:13,208 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:13,208 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:13,208 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e. 2024-11-23T17:10:13,208 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b. 2024-11-23T17:10:13,208 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for bae5f9b5ab6880e448eddfffe03e1e3e: Waiting for close lock at 1732381813204Running coprocessor pre-close hooks at 1732381813204Disabling compacts and flushes for region at 1732381813204Disabling writes for close at 1732381813204Writing region close event to WAL at 1732381813205 (+1 ms)Running coprocessor post-close hooks at 1732381813208 (+3 ms)Closed at 1732381813208 2024-11-23T17:10:13,208 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for b01b4a0d5a04a2232b0b7ceae0e52b7b: Waiting for close lock at 1732381813203Running coprocessor pre-close hooks at 1732381813203Disabling compacts and flushes for region at 1732381813204 (+1 ms)Disabling writes for close at 1732381813204Writing region close event to WAL at 1732381813204Running coprocessor post-close hooks at 1732381813208 (+4 ms)Closed at 1732381813208 2024-11-23T17:10:13,210 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:13,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=bae5f9b5ab6880e448eddfffe03e1e3e, regionState=CLOSED 2024-11-23T17:10:13,210 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:13,211 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=b01b4a0d5a04a2232b0b7ceae0e52b7b, regionState=CLOSED 2024-11-23T17:10:13,212 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:13,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:10:13,214 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-11-23T17:10:13,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure bae5f9b5ab6880e448eddfffe03e1e3e, server=1a7c5470b472,37875,1732381534675 in 161 msec 2024-11-23T17:10:13,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-11-23T17:10:13,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae5f9b5ab6880e448eddfffe03e1e3e, UNASSIGN in 168 msec 2024-11-23T17:10:13,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure b01b4a0d5a04a2232b0b7ceae0e52b7b, server=1a7c5470b472,43453,1732381534825 in 163 msec 2024-11-23T17:10:13,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-23T17:10:13,217 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=b01b4a0d5a04a2232b0b7ceae0e52b7b, UNASSIGN in 169 msec 2024-11-23T17:10:13,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-11-23T17:10:13,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 172 msec 2024-11-23T17:10:13,220 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381813220"}]},"ts":"1732381813220"} 2024-11-23T17:10:13,222 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-23T17:10:13,222 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-23T17:10:13,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 183 msec 2024-11-23T17:10:13,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:10:13,358 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:10:13,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,360 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,360 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,362 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,364 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:13,364 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:13,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,367 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/recovered.edits] 2024-11-23T17:10:13,367 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/recovered.edits] 2024-11-23T17:10:13,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:10:13,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:10:13,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:10:13,367 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-23T17:10:13,370 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,370 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,370 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,370 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,372 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/cf/3ce7d56a76f744b78497b2e8eafd9843 2024-11-23T17:10:13,372 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/cf/1250447bceda4404be2413b8c6ae5c3c 2024-11-23T17:10:13,374 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e/recovered.edits/9.seqid 2024-11-23T17:10:13,374 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b/recovered.edits/9.seqid 2024-11-23T17:10:13,374 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:13,374 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportExpiredSnapshot/b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:13,374 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-23T17:10:13,375 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-23T17:10:13,375 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-23T17:10:13,380 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b2024112314e20be28eb74beb8691eec68d1e1a4d_b01b4a0d5a04a2232b0b7ceae0e52b7b 2024-11-23T17:10:13,381 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e202411238ed8ed776cea487ca09a8675b75bf7aa_bae5f9b5ab6880e448eddfffe03e1e3e 2024-11-23T17:10:13,381 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-23T17:10:13,383 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,385 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-23T17:10:13,388 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-23T17:10:13,389 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,389 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-23T17:10:13,389 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381813389"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:13,389 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381813389"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:13,391 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:10:13,391 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => bae5f9b5ab6880e448eddfffe03e1e3e, NAME => 'testtb-testExportExpiredSnapshot,,1732381800661.bae5f9b5ab6880e448eddfffe03e1e3e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b01b4a0d5a04a2232b0b7ceae0e52b7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732381800661.b01b4a0d5a04a2232b0b7ceae0e52b7b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:10:13,391 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-23T17:10:13,391 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381813391"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:13,392 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-23T17:10:13,393 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 35 msec 2024-11-23T17:10:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-23T17:10:13,478 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-23T17:10:13,478 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:10:13,485 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:10:13,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:10:13,487 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:10:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-23T17:10:13,490 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:10:13,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-23T17:10:13,510 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=811 (was 817), OpenFileDescriptor=797 (was 822), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=518 (was 574), ProcessCount=14 (was 14), AvailableMemoryMB=2916 (was 2855) - AvailableMemoryMB LEAK? - 2024-11-23T17:10:13,511 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-11-23T17:10:13,527 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=811, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=518, ProcessCount=14, AvailableMemoryMB=2915 2024-11-23T17:10:13,527 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-11-23T17:10:13,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:10:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:13,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:10:13,531 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-11-23T17:10:13,531 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:10:13,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:10:13,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742271_1447 (size=448) 2024-11-23T17:10:13,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742271_1447 (size=448) 2024-11-23T17:10:13,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742271_1447 (size=448) 2024-11-23T17:10:13,541 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4310449f69a358a61c60755e5836a16b, NAME => 'testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:13,541 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6239371d130b8bd7fbaeb3d576406c93, NAME => 'testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:13,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742272_1448 (size=73) 2024-11-23T17:10:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742272_1448 (size=73) 2024-11-23T17:10:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742273_1449 (size=73) 2024-11-23T17:10:13,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742273_1449 (size=73) 2024-11-23T17:10:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742272_1448 (size=73) 2024-11-23T17:10:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742273_1449 (size=73) 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 4310449f69a358a61c60755e5836a16b, disabling compactions & flushes 2024-11-23T17:10:13,548 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 6239371d130b8bd7fbaeb3d576406c93, disabling compactions & flushes 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. after waiting 0 ms 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,548 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,548 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. after waiting 0 ms 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4310449f69a358a61c60755e5836a16b: Waiting for close lock at 1732381813548Disabling compacts and flushes for region at 1732381813548Disabling writes for close at 1732381813548Writing region close event to WAL at 1732381813548Closed at 1732381813548 2024-11-23T17:10:13,548 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,548 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6239371d130b8bd7fbaeb3d576406c93: Waiting for close lock at 1732381813548Disabling compacts and flushes for region at 1732381813548Disabling writes for close at 1732381813548Writing region close event to WAL at 1732381813548Closed at 1732381813548 2024-11-23T17:10:13,549 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:10:13,550 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732381813550"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381813550"}]},"ts":"1732381813550"} 2024-11-23T17:10:13,550 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732381813550"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381813550"}]},"ts":"1732381813550"} 2024-11-23T17:10:13,552 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:10:13,553 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:10:13,553 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381813553"}]},"ts":"1732381813553"} 2024-11-23T17:10:13,554 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T17:10:13,555 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:10:13,555 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:10:13,555 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:10:13,555 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:10:13,556 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:10:13,556 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:10:13,556 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:10:13,556 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:10:13,556 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:10:13,556 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:10:13,556 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:10:13,556 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, ASSIGN}] 2024-11-23T17:10:13,557 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, ASSIGN 2024-11-23T17:10:13,557 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, ASSIGN 2024-11-23T17:10:13,557 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:10:13,557 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:10:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:10:13,708 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:10:13,708 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=4310449f69a358a61c60755e5836a16b, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:13,708 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=6239371d130b8bd7fbaeb3d576406c93, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:13,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, ASSIGN because future has completed 2024-11-23T17:10:13,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:13,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, ASSIGN because future has completed 2024-11-23T17:10:13,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:10:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:10:13,865 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,865 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 6239371d130b8bd7fbaeb3d576406c93, NAME => 'testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:10:13,865 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. service=AccessControlService 2024-11-23T17:10:13,866 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,866 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 4310449f69a358a61c60755e5836a16b, NAME => 'testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. service=AccessControlService 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,866 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,866 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,867 INFO [StoreOpener-6239371d130b8bd7fbaeb3d576406c93-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,867 INFO [StoreOpener-4310449f69a358a61c60755e5836a16b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,868 INFO [StoreOpener-6239371d130b8bd7fbaeb3d576406c93-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6239371d130b8bd7fbaeb3d576406c93 columnFamilyName cf 2024-11-23T17:10:13,868 INFO [StoreOpener-4310449f69a358a61c60755e5836a16b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4310449f69a358a61c60755e5836a16b columnFamilyName cf 2024-11-23T17:10:13,869 DEBUG [StoreOpener-6239371d130b8bd7fbaeb3d576406c93-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:13,869 DEBUG [StoreOpener-4310449f69a358a61c60755e5836a16b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:13,869 INFO [StoreOpener-4310449f69a358a61c60755e5836a16b-1 {}] regionserver.HStore(327): Store=4310449f69a358a61c60755e5836a16b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:13,869 INFO [StoreOpener-6239371d130b8bd7fbaeb3d576406c93-1 {}] regionserver.HStore(327): Store=6239371d130b8bd7fbaeb3d576406c93/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:13,870 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,870 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,870 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,870 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,871 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,872 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,872 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,874 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:13,874 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:13,874 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 6239371d130b8bd7fbaeb3d576406c93; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67945462, jitterRate=0.012466281652450562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:13,874 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:13,874 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 4310449f69a358a61c60755e5836a16b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61882023, jitterRate=-0.07788600027561188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:13,874 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:13,875 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 4310449f69a358a61c60755e5836a16b: Running coprocessor pre-open hook at 1732381813866Writing region info on filesystem at 1732381813866Initializing all the Stores at 1732381813867 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381813867Cleaning up temporary data from old regions at 1732381813871 (+4 ms)Running coprocessor post-open hooks at 1732381813874 (+3 ms)Region opened successfully at 1732381813875 (+1 ms) 2024-11-23T17:10:13,875 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 6239371d130b8bd7fbaeb3d576406c93: Running coprocessor pre-open hook at 1732381813866Writing region info on filesystem at 1732381813866Initializing all the Stores at 1732381813867 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381813867Cleaning up temporary data from old regions at 1732381813871 (+4 ms)Running coprocessor post-open hooks at 1732381813874 (+3 ms)Region opened successfully at 1732381813875 (+1 ms) 2024-11-23T17:10:13,875 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b., pid=199, masterSystemTime=1732381813863 2024-11-23T17:10:13,875 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93., pid=198, masterSystemTime=1732381813862 2024-11-23T17:10:13,877 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,877 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:13,877 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=4310449f69a358a61c60755e5836a16b, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:13,878 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,878 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:13,878 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=6239371d130b8bd7fbaeb3d576406c93, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:13,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:10:13,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:13,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-11-23T17:10:13,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872 in 169 msec 2024-11-23T17:10:13,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, ASSIGN in 326 msec 2024-11-23T17:10:13,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-11-23T17:10:13,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675 in 171 msec 2024-11-23T17:10:13,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-11-23T17:10:13,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, ASSIGN in 327 msec 2024-11-23T17:10:13,886 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:10:13,886 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381813886"}]},"ts":"1732381813886"} 2024-11-23T17:10:13,888 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T17:10:13,888 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:10:13,889 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-23T17:10:13,891 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:10:13,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,897 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:13,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 368 msec 2024-11-23T17:10:14,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:10:14,158 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:10:14,158 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,161 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-23T17:10:14,161 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,161 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:14,163 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,167 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,171 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,172 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:10:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381814172 (current time:1732381814172). 2024-11-23T17:10:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:14,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:10:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a23de13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:14,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:14,174 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:14,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:14,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:14,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@677ce4e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:14,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:14,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,176 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:14,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50dfaea9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:14,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:14,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:14,178 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:14,179 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,179 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:14,179 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-23T17:10:14,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32122943, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:14,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:14,180 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3abc576f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:14,181 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,182 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56576, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:14,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@575f62f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:14,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:14,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:14,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43396, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:14,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:14,187 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:10:14,188 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:10:14,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T17:10:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:10:14,190 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:14,191 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:14,193 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:14,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742274_1450 (size=185) 2024-11-23T17:10:14,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742274_1450 (size=185) 2024-11-23T17:10:14,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742274_1450 (size=185) 2024-11-23T17:10:14,205 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:14,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93}] 2024-11-23T17:10:14,206 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,206 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:10:14,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T17:10:14,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T17:10:14,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T17:10:14,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-23T17:10:14,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 4310449f69a358a61c60755e5836a16b: 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 6239371d130b8bd7fbaeb3d576406c93: 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:10:14,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:14,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:14,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742276_1452 (size=76) 2024-11-23T17:10:14,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742275_1451 (size=76) 2024-11-23T17:10:14,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742275_1451 (size=76) 2024-11-23T17:10:14,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742276_1452 (size=76) 2024-11-23T17:10:14,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742275_1451 (size=76) 2024-11-23T17:10:14,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742276_1452 (size=76) 2024-11-23T17:10:14,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:14,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-23T17:10:14,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-11-23T17:10:14,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-23T17:10:14,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-11-23T17:10:14,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,371 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,372 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,372 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,374 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 in 167 msec 2024-11-23T17:10:14,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-11-23T17:10:14,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b in 167 msec 2024-11-23T17:10:14,375 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:14,375 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:14,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:14,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:14,376 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:14,377 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:10:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742277_1453 (size=68) 2024-11-23T17:10:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742277_1453 (size=68) 2024-11-23T17:10:14,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742277_1453 (size=68) 2024-11-23T17:10:14,387 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:14,387 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,388 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742278_1454 (size=673) 2024-11-23T17:10:14,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742278_1454 (size=673) 2024-11-23T17:10:14,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742278_1454 (size=673) 2024-11-23T17:10:14,400 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:14,404 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:14,404 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,405 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:14,405 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T17:10:14,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 217 msec 2024-11-23T17:10:14,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:10:14,507 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:10:14,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:14,514 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:14,515 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,517 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-23T17:10:14,517 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,517 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:14,519 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,523 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,527 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:10:14,529 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:10:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381814529 (current time:1732381814529). 2024-11-23T17:10:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:10:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f05615c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:14,531 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:14,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:14,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:14,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1baac736, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:14,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:14,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,532 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:14,533 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5043c8a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:14,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:14,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:14,534 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:14,535 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,536 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29160a97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:14,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:14,537 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:14,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:14,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:14,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@95c1cc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:14,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:14,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,538 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56616, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:14,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44dceeb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:14,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:14,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:14,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:14,540 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43414, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:14,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:14,543 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:14,543 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:14,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:10:14,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:10:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T17:10:14,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:10:14,545 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:14,546 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:14,548 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742279_1455 (size=180) 2024-11-23T17:10:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742279_1455 (size=180) 2024-11-23T17:10:14,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742279_1455 (size=180) 2024-11-23T17:10:14,555 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:14,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93}] 2024-11-23T17:10:14,556 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:10:14,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-23T17:10:14,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-11-23T17:10:14,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:14,708 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 4310449f69a358a61c60755e5836a16b 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-23T17:10:14,708 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 6239371d130b8bd7fbaeb3d576406c93 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-23T17:10:14,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b is 71, key is 07554c56d7159e8a6e5903a553cf4a66/cf:q/1732381814512/Put/seqid=0 2024-11-23T17:10:14,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 is 71, key is 13c8f8aa7f727a992eabb43a4ec0c038/cf:q/1732381814514/Put/seqid=0 2024-11-23T17:10:14,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742280_1456 (size=5242) 2024-11-23T17:10:14,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742280_1456 (size=5242) 2024-11-23T17:10:14,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742280_1456 (size=5242) 2024-11-23T17:10:14,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:14,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742281_1457 (size=8031) 2024-11-23T17:10:14,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742281_1457 (size=8031) 2024-11-23T17:10:14,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742281_1457 (size=8031) 2024-11-23T17:10:14,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:14,743 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/.tmp/cf/7e5846a047c34c5b8ffdeb3fdbca4587, store: [table=testtb-testEmptyExportFileSystemState family=cf region=4310449f69a358a61c60755e5836a16b] 2024-11-23T17:10:14,744 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/.tmp/cf/7e5846a047c34c5b8ffdeb3fdbca4587 is 214, key is 0491210c341bed7d2f76d70b234142571/cf:q/1732381814512/Put/seqid=0 2024-11-23T17:10:14,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/.tmp/cf/8f5e8819596b47f6ba5169532c552d5a, store: [table=testtb-testEmptyExportFileSystemState family=cf region=6239371d130b8bd7fbaeb3d576406c93] 2024-11-23T17:10:14,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/.tmp/cf/8f5e8819596b47f6ba5169532c552d5a is 214, key is 16057255da7cbe96debc4560bdd4d30c2/cf:q/1732381814514/Put/seqid=0 2024-11-23T17:10:14,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742282_1458 (size=6358) 2024-11-23T17:10:14,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742282_1458 (size=6358) 2024-11-23T17:10:14,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742282_1458 (size=6358) 2024-11-23T17:10:14,751 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/.tmp/cf/7e5846a047c34c5b8ffdeb3fdbca4587 2024-11-23T17:10:14,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/.tmp/cf/7e5846a047c34c5b8ffdeb3fdbca4587 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587 2024-11-23T17:10:14,760 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587, entries=5, sequenceid=6, filesize=6.2 K 2024-11-23T17:10:14,761 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 4310449f69a358a61c60755e5836a16b in 53ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 4310449f69a358a61c60755e5836a16b: 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587] hfiles 2024-11-23T17:10:14,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742283_1459 (size=14817) 2024-11-23T17:10:14,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742283_1459 (size=14817) 2024-11-23T17:10:14,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742283_1459 (size=14817) 2024-11-23T17:10:14,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/.tmp/cf/8f5e8819596b47f6ba5169532c552d5a 2024-11-23T17:10:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742284_1460 (size=115) 2024-11-23T17:10:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742284_1460 (size=115) 2024-11-23T17:10:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742284_1460 (size=115) 2024-11-23T17:10:14,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:14,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-11-23T17:10:14,769 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/.tmp/cf/8f5e8819596b47f6ba5169532c552d5a as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a 2024-11-23T17:10:14,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-11-23T17:10:14,769 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,769 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4310449f69a358a61c60755e5836a16b in 215 msec 2024-11-23T17:10:14,773 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a, entries=45, sequenceid=6, filesize=14.5 K 2024-11-23T17:10:14,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 6239371d130b8bd7fbaeb3d576406c93 in 66ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 6239371d130b8bd7fbaeb3d576406c93: 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a] hfiles 2024-11-23T17:10:14,774 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742285_1461 (size=115) 2024-11-23T17:10:14,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742285_1461 (size=115) 2024-11-23T17:10:14,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742285_1461 (size=115) 2024-11-23T17:10:14,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:14,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-23T17:10:14,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-23T17:10:14,782 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,782 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-11-23T17:10:14,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6239371d130b8bd7fbaeb3d576406c93 in 228 msec 2024-11-23T17:10:14,785 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:14,785 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:14,786 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:14,786 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:14,786 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:14,788 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b] hfiles 2024-11-23T17:10:14,788 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:14,788 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:14,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742286_1462 (size=299) 2024-11-23T17:10:14,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742286_1462 (size=299) 2024-11-23T17:10:14,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742286_1462 (size=299) 2024-11-23T17:10:14,795 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:14,795 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,796 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742287_1463 (size=983) 2024-11-23T17:10:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742287_1463 (size=983) 2024-11-23T17:10:14,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742287_1463 (size=983) 2024-11-23T17:10:14,809 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:14,814 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:14,814 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,816 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:14,816 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T17:10:14,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 272 msec 2024-11-23T17:10:14,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:10:14,858 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:10:14,858 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858 2024-11-23T17:10:14,859 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:14,890 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:14,890 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,892 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:10:14,896 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:14,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742288_1464 (size=185) 2024-11-23T17:10:14,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742288_1464 (size=185) 2024-11-23T17:10:14,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742288_1464 (size=185) 2024-11-23T17:10:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742289_1465 (size=673) 2024-11-23T17:10:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742289_1465 (size=673) 2024-11-23T17:10:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742289_1465 (size=673) 2024-11-23T17:10:14,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:14,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:14,912 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:15,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-3936264036037755862.jar 2024-11-23T17:10:15,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:15,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-12619055698549507060.jar 2024-11-23T17:10:16,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,013 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:10:16,014 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:10:16,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:10:16,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:10:16,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:10:16,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:10:16,015 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:10:16,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:10:16,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:10:16,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:10:16,016 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:16,017 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742290_1466 (size=24020) 2024-11-23T17:10:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742290_1466 (size=24020) 2024-11-23T17:10:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742290_1466 (size=24020) 2024-11-23T17:10:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742291_1467 (size=77755) 2024-11-23T17:10:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742291_1467 (size=77755) 2024-11-23T17:10:16,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742291_1467 (size=77755) 2024-11-23T17:10:16,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742292_1468 (size=131360) 2024-11-23T17:10:16,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742292_1468 (size=131360) 2024-11-23T17:10:16,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742292_1468 (size=131360) 2024-11-23T17:10:16,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742293_1469 (size=111793) 2024-11-23T17:10:16,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742293_1469 (size=111793) 2024-11-23T17:10:16,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742293_1469 (size=111793) 2024-11-23T17:10:16,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742294_1470 (size=1832290) 2024-11-23T17:10:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742294_1470 (size=1832290) 2024-11-23T17:10:16,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742294_1470 (size=1832290) 2024-11-23T17:10:16,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742295_1471 (size=8360005) 2024-11-23T17:10:16,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742295_1471 (size=8360005) 2024-11-23T17:10:16,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742295_1471 (size=8360005) 2024-11-23T17:10:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742296_1472 (size=503880) 2024-11-23T17:10:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742296_1472 (size=503880) 2024-11-23T17:10:16,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742296_1472 (size=503880) 2024-11-23T17:10:16,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742297_1473 (size=322274) 2024-11-23T17:10:16,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742297_1473 (size=322274) 2024-11-23T17:10:16,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742297_1473 (size=322274) 2024-11-23T17:10:16,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742298_1474 (size=20406) 2024-11-23T17:10:16,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742298_1474 (size=20406) 2024-11-23T17:10:16,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742298_1474 (size=20406) 2024-11-23T17:10:16,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742299_1475 (size=45609) 2024-11-23T17:10:16,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742299_1475 (size=45609) 2024-11-23T17:10:16,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742299_1475 (size=45609) 2024-11-23T17:10:16,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742300_1476 (size=136454) 2024-11-23T17:10:16,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742300_1476 (size=136454) 2024-11-23T17:10:16,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742300_1476 (size=136454) 2024-11-23T17:10:16,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742301_1477 (size=1597270) 2024-11-23T17:10:16,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742301_1477 (size=1597270) 2024-11-23T17:10:16,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742301_1477 (size=1597270) 2024-11-23T17:10:16,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742302_1478 (size=6424742) 2024-11-23T17:10:16,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742302_1478 (size=6424742) 2024-11-23T17:10:16,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742302_1478 (size=6424742) 2024-11-23T17:10:16,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742303_1479 (size=30873) 2024-11-23T17:10:16,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742303_1479 (size=30873) 2024-11-23T17:10:16,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742303_1479 (size=30873) 2024-11-23T17:10:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742304_1480 (size=29229) 2024-11-23T17:10:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742304_1480 (size=29229) 2024-11-23T17:10:16,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742304_1480 (size=29229) 2024-11-23T17:10:16,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742305_1481 (size=903661) 2024-11-23T17:10:16,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742305_1481 (size=903661) 2024-11-23T17:10:16,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742305_1481 (size=903661) 2024-11-23T17:10:16,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:10:16,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:10:16,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:10:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742307_1483 (size=232881) 2024-11-23T17:10:16,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742307_1483 (size=232881) 2024-11-23T17:10:16,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742307_1483 (size=232881) 2024-11-23T17:10:16,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742308_1484 (size=1323991) 2024-11-23T17:10:16,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742308_1484 (size=1323991) 2024-11-23T17:10:16,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742308_1484 (size=1323991) 2024-11-23T17:10:16,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742309_1485 (size=4695811) 2024-11-23T17:10:16,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742309_1485 (size=4695811) 2024-11-23T17:10:16,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742309_1485 (size=4695811) 2024-11-23T17:10:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742310_1486 (size=1877034) 2024-11-23T17:10:16,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742310_1486 (size=1877034) 2024-11-23T17:10:16,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742310_1486 (size=1877034) 2024-11-23T17:10:16,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742311_1487 (size=217555) 2024-11-23T17:10:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742311_1487 (size=217555) 2024-11-23T17:10:16,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742311_1487 (size=217555) 2024-11-23T17:10:16,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742312_1488 (size=4188619) 2024-11-23T17:10:16,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742312_1488 (size=4188619) 2024-11-23T17:10:16,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742312_1488 (size=4188619) 2024-11-23T17:10:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742313_1489 (size=127628) 2024-11-23T17:10:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742313_1489 (size=127628) 2024-11-23T17:10:16,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742313_1489 (size=127628) 2024-11-23T17:10:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742314_1490 (size=440956) 2024-11-23T17:10:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742314_1490 (size=440956) 2024-11-23T17:10:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742314_1490 (size=440956) 2024-11-23T17:10:16,745 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:10:16,747 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-23T17:10:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742315_1491 (size=7) 2024-11-23T17:10:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742315_1491 (size=7) 2024-11-23T17:10:16,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742315_1491 (size=7) 2024-11-23T17:10:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742316_1492 (size=10) 2024-11-23T17:10:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742316_1492 (size=10) 2024-11-23T17:10:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742316_1492 (size=10) 2024-11-23T17:10:16,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742317_1493 (size=303982) 2024-11-23T17:10:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742317_1493 (size=303982) 2024-11-23T17:10:16,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742317_1493 (size=303982) 2024-11-23T17:10:16,787 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:10:16,787 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:10:16,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0008_000001 (auth:SIMPLE) from 127.0.0.1:41408 2024-11-23T17:10:18,732 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:10:22,237 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0008_000001 (auth:SIMPLE) from 127.0.0.1:32796 2024-11-23T17:10:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742318_1494 (size=349656) 2024-11-23T17:10:22,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742318_1494 (size=349656) 2024-11-23T17:10:22,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742318_1494 (size=349656) 2024-11-23T17:10:23,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742319_1495 (size=8568) 2024-11-23T17:10:23,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742319_1495 (size=8568) 2024-11-23T17:10:23,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742319_1495 (size=8568) 2024-11-23T17:10:23,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742320_1496 (size=460) 2024-11-23T17:10:23,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742320_1496 (size=460) 2024-11-23T17:10:23,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742320_1496 (size=460) 2024-11-23T17:10:23,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742321_1497 (size=8568) 2024-11-23T17:10:23,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742321_1497 (size=8568) 2024-11-23T17:10:23,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742321_1497 (size=8568) 2024-11-23T17:10:23,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742322_1498 (size=349656) 2024-11-23T17:10:23,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742322_1498 (size=349656) 2024-11-23T17:10:23,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742322_1498 (size=349656) 2024-11-23T17:10:24,897 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:10:24,898 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:10:24,902 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:24,902 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:10:24,902 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:10:24,902 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:24,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T17:10:24,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T17:10:24,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:24,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T17:10:24,903 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381814858/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T17:10:24,907 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-23T17:10:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:10:24,910 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381824910"}]},"ts":"1732381824910"} 2024-11-23T17:10:24,911 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T17:10:24,911 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-23T17:10:24,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-23T17:10:24,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, UNASSIGN}] 2024-11-23T17:10:24,914 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, UNASSIGN 2024-11-23T17:10:24,914 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, UNASSIGN 2024-11-23T17:10:24,915 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=6239371d130b8bd7fbaeb3d576406c93, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:24,915 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=4310449f69a358a61c60755e5836a16b, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:24,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, UNASSIGN because future has completed 2024-11-23T17:10:24,916 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:24,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:24,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, UNASSIGN because future has completed 2024-11-23T17:10:24,917 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:24,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:10:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:10:25,069 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:25,069 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 6239371d130b8bd7fbaeb3d576406c93, disabling compactions & flushes 2024-11-23T17:10:25,069 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 4310449f69a358a61c60755e5836a16b, disabling compactions & flushes 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. after waiting 0 ms 2024-11-23T17:10:25,069 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. after waiting 0 ms 2024-11-23T17:10:25,069 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:25,073 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:25,073 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:10:25,074 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:25,074 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:25,074 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93. 2024-11-23T17:10:25,074 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b. 2024-11-23T17:10:25,074 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 4310449f69a358a61c60755e5836a16b: Waiting for close lock at 1732381825069Running coprocessor pre-close hooks at 1732381825069Disabling compacts and flushes for region at 1732381825069Disabling writes for close at 1732381825069Writing region close event to WAL at 1732381825070 (+1 ms)Running coprocessor post-close hooks at 1732381825074 (+4 ms)Closed at 1732381825074 2024-11-23T17:10:25,074 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 6239371d130b8bd7fbaeb3d576406c93: Waiting for close lock at 1732381825069Running coprocessor pre-close hooks at 1732381825069Disabling compacts and flushes for region at 1732381825069Disabling writes for close at 1732381825069Writing region close event to WAL at 1732381825070 (+1 ms)Running coprocessor post-close hooks at 1732381825073 (+3 ms)Closed at 1732381825074 (+1 ms) 2024-11-23T17:10:25,075 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:25,076 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=6239371d130b8bd7fbaeb3d576406c93, regionState=CLOSED 2024-11-23T17:10:25,076 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:25,077 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=4310449f69a358a61c60755e5836a16b, regionState=CLOSED 2024-11-23T17:10:25,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:25,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:10:25,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-11-23T17:10:25,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 6239371d130b8bd7fbaeb3d576406c93, server=1a7c5470b472,37875,1732381534675 in 162 msec 2024-11-23T17:10:25,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-11-23T17:10:25,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=6239371d130b8bd7fbaeb3d576406c93, UNASSIGN in 167 msec 2024-11-23T17:10:25,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 4310449f69a358a61c60755e5836a16b, server=1a7c5470b472,44711,1732381534872 in 163 msec 2024-11-23T17:10:25,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-23T17:10:25,086 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4310449f69a358a61c60755e5836a16b, UNASSIGN in 172 msec 2024-11-23T17:10:25,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-11-23T17:10:25,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 175 msec 2024-11-23T17:10:25,090 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381825090"}]},"ts":"1732381825090"} 2024-11-23T17:10:25,091 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T17:10:25,091 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-23T17:10:25,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 185 msec 2024-11-23T17:10:25,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:10:25,228 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:10:25,229 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,230 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,231 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,234 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:25,234 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:25,236 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/recovered.edits] 2024-11-23T17:10:25,236 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/recovered.edits] 2024-11-23T17:10:25,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:10:25,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:10:25,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:10:25,239 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:10:25,240 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/cf/8f5e8819596b47f6ba5169532c552d5a 2024-11-23T17:10:25,240 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/cf/7e5846a047c34c5b8ffdeb3fdbca4587 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,241 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-11-23T17:10:25,242 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,243 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,243 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,243 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,244 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b/recovered.edits/9.seqid 2024-11-23T17:10:25,244 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93/recovered.edits/9.seqid 2024-11-23T17:10:25,244 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:25,244 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testEmptyExportFileSystemState/6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:25,245 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-23T17:10:25,245 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-23T17:10:25,246 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-23T17:10:25,248 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241123e54e0211e6224391a271a7fa058c4876_6239371d130b8bd7fbaeb3d576406c93 2024-11-23T17:10:25,249 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241123268ba1907e5a44b79704fa15c80217b8_4310449f69a358a61c60755e5836a16b 2024-11-23T17:10:25,250 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-23T17:10:25,252 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,254 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-23T17:10:25,256 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-23T17:10:25,257 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,257 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-23T17:10:25,257 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381825257"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:25,257 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381825257"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:25,259 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:10:25,259 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4310449f69a358a61c60755e5836a16b, NAME => 'testtb-testEmptyExportFileSystemState,,1732381813528.4310449f69a358a61c60755e5836a16b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6239371d130b8bd7fbaeb3d576406c93, NAME => 'testtb-testEmptyExportFileSystemState,1,1732381813528.6239371d130b8bd7fbaeb3d576406c93.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:10:25,259 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-23T17:10:25,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381825260"}]},"ts":"9223372036854775807"} 2024-11-23T17:10:25,261 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-23T17:10:25,262 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,263 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 33 msec 2024-11-23T17:10:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-11-23T17:10:25,348 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-23T17:10:25,348 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:10:25,353 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T17:10:25,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:25,356 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T17:10:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-23T17:10:25,381 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=821 (was 811) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:52168 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 21877) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_794433593_1 at /127.0.0.1:57498 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:43583 from appattempt_1732381541272_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:57540 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7436 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:37209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:51104 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_794433593_1 at /127.0.0.1:52158 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 797) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=515 (was 518), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2448 (was 2915) 2024-11-23T17:10:25,382 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-11-23T17:10:25,400 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=821, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=515, ProcessCount=17, AvailableMemoryMB=2446 2024-11-23T17:10:25,400 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-11-23T17:10:25,401 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:10:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:10:25,403 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:10:25,403 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-11-23T17:10:25,403 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:10:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:10:25,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742323_1499 (size=440) 2024-11-23T17:10:25,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742323_1499 (size=440) 2024-11-23T17:10:25,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742323_1499 (size=440) 2024-11-23T17:10:25,411 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => afb0f86b4bfaa9a4dd331ad4691ba018, NAME => 'testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:25,412 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => de3ad9c9965ac0ad50fdb7f1172460e5, NAME => 'testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:25,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742324_1500 (size=65) 2024-11-23T17:10:25,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742324_1500 (size=65) 2024-11-23T17:10:25,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742324_1500 (size=65) 2024-11-23T17:10:25,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742325_1501 (size=65) 2024-11-23T17:10:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742325_1501 (size=65) 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing afb0f86b4bfaa9a4dd331ad4691ba018, disabling compactions & flushes 2024-11-23T17:10:25,419 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742325_1501 (size=65) 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. after waiting 0 ms 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,419 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for afb0f86b4bfaa9a4dd331ad4691ba018: Waiting for close lock at 1732381825419Disabling compacts and flushes for region at 1732381825419Disabling writes for close at 1732381825419Writing region close event to WAL at 1732381825419Closed at 1732381825419 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing de3ad9c9965ac0ad50fdb7f1172460e5, disabling compactions & flushes 2024-11-23T17:10:25,419 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. after waiting 0 ms 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,419 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,419 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for de3ad9c9965ac0ad50fdb7f1172460e5: Waiting for close lock at 1732381825419Disabling compacts and flushes for region at 1732381825419Disabling writes for close at 1732381825419Writing region close event to WAL at 1732381825419Closed at 1732381825419 2024-11-23T17:10:25,420 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:10:25,421 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381825420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381825420"}]},"ts":"1732381825420"} 2024-11-23T17:10:25,421 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732381825420"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381825420"}]},"ts":"1732381825420"} 2024-11-23T17:10:25,423 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:10:25,424 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:10:25,424 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381825424"}]},"ts":"1732381825424"} 2024-11-23T17:10:25,425 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:10:25,426 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:10:25,426 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:10:25,426 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:10:25,426 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:10:25,427 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:10:25,427 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, ASSIGN}] 2024-11-23T17:10:25,428 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, ASSIGN 2024-11-23T17:10:25,428 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, ASSIGN 2024-11-23T17:10:25,428 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, ASSIGN; state=OFFLINE, location=1a7c5470b472,37875,1732381534675; forceNewPlan=false, retain=false 2024-11-23T17:10:25,428 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:10:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:10:25,579 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:10:25,579 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=afb0f86b4bfaa9a4dd331ad4691ba018, regionState=OPENING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:25,579 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=de3ad9c9965ac0ad50fdb7f1172460e5, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:25,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, ASSIGN because future has completed 2024-11-23T17:10:25,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:10:25,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, ASSIGN because future has completed 2024-11-23T17:10:25,582 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:10:25,736 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,736 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,736 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => afb0f86b4bfaa9a4dd331ad4691ba018, NAME => 'testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:10:25,736 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => de3ad9c9965ac0ad50fdb7f1172460e5, NAME => 'testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. service=AccessControlService 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. service=AccessControlService 2024-11-23T17:10:25,737 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:25,737 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,737 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,738 INFO [StoreOpener-de3ad9c9965ac0ad50fdb7f1172460e5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,738 INFO [StoreOpener-afb0f86b4bfaa9a4dd331ad4691ba018-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,739 INFO [StoreOpener-afb0f86b4bfaa9a4dd331ad4691ba018-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region afb0f86b4bfaa9a4dd331ad4691ba018 columnFamilyName cf 2024-11-23T17:10:25,739 INFO [StoreOpener-de3ad9c9965ac0ad50fdb7f1172460e5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region de3ad9c9965ac0ad50fdb7f1172460e5 columnFamilyName cf 2024-11-23T17:10:25,740 DEBUG [StoreOpener-de3ad9c9965ac0ad50fdb7f1172460e5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:25,740 DEBUG [StoreOpener-afb0f86b4bfaa9a4dd331ad4691ba018-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:25,740 INFO [StoreOpener-de3ad9c9965ac0ad50fdb7f1172460e5-1 {}] regionserver.HStore(327): Store=de3ad9c9965ac0ad50fdb7f1172460e5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:25,740 INFO [StoreOpener-afb0f86b4bfaa9a4dd331ad4691ba018-1 {}] regionserver.HStore(327): Store=afb0f86b4bfaa9a4dd331ad4691ba018/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:25,741 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,741 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,741 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,741 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,742 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,743 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,743 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,745 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:25,745 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:10:25,745 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened afb0f86b4bfaa9a4dd331ad4691ba018; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62661057, jitterRate=-0.06627748906612396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:25,745 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:25,745 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened de3ad9c9965ac0ad50fdb7f1172460e5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75069147, jitterRate=0.1186174601316452}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:25,745 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:25,746 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for de3ad9c9965ac0ad50fdb7f1172460e5: Running coprocessor pre-open hook at 1732381825737Writing region info on filesystem at 1732381825737Initializing all the Stores at 1732381825738 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381825738Cleaning up temporary data from old regions at 1732381825742 (+4 ms)Running coprocessor post-open hooks at 1732381825745 (+3 ms)Region opened successfully at 1732381825746 (+1 ms) 2024-11-23T17:10:25,746 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for afb0f86b4bfaa9a4dd331ad4691ba018: Running coprocessor pre-open hook at 1732381825737Writing region info on filesystem at 1732381825737Initializing all the Stores at 1732381825738 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381825738Cleaning up temporary data from old regions at 1732381825742 (+4 ms)Running coprocessor post-open hooks at 1732381825745 (+3 ms)Region opened successfully at 1732381825746 (+1 ms) 2024-11-23T17:10:25,746 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5., pid=216, masterSystemTime=1732381825733 2024-11-23T17:10:25,746 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018., pid=217, masterSystemTime=1732381825734 2024-11-23T17:10:25,748 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,748 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:25,748 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=de3ad9c9965ac0ad50fdb7f1172460e5, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:25,748 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,748 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:25,749 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=afb0f86b4bfaa9a4dd331ad4691ba018, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:25,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:10:25,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:25,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-11-23T17:10:25,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825 in 169 msec 2024-11-23T17:10:25,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-11-23T17:10:25,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675 in 169 msec 2024-11-23T17:10:25,753 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, ASSIGN in 325 msec 2024-11-23T17:10:25,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-11-23T17:10:25,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, ASSIGN in 326 msec 2024-11-23T17:10:25,755 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:10:25,755 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381825755"}]},"ts":"1732381825755"} 2024-11-23T17:10:25,756 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-23T17:10:25,756 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:10:25,757 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-23T17:10:25,759 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:10:25,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:10:25,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,764 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,765 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,765 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,765 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:10:25,765 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 362 msec 2024-11-23T17:10:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:10:26,028 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:10:26,028 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-23T17:10:26,030 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:26,032 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,035 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,039 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,041 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381826041 (current time:1732381826041). 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@160ce1b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:26,042 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fa880f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:26,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,044 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39270, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:26,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29338a3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:26,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:26,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:26,046 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34510, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:26,047 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,047 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4091ccf7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:26,048 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:26,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:26,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:26,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d1568cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:26,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:26,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,049 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39288, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:26,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@109db53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:26,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:26,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:26,051 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34514, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:26,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:26,054 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159. 2024-11-23T17:10:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,054 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:10:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:10:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T17:10:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:10:26,056 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:26,058 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:26,060 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:26,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742326_1502 (size=161) 2024-11-23T17:10:26,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742326_1502 (size=161) 2024-11-23T17:10:26,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742326_1502 (size=161) 2024-11-23T17:10:26,066 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:26,066 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5}] 2024-11-23T17:10:26,067 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,067 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:10:26,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T17:10:26,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-11-23T17:10:26,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-11-23T17:10:26,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for de3ad9c9965ac0ad50fdb7f1172460e5: 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for afb0f86b4bfaa9a4dd331ad4691ba018: 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:26,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:10:26,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742327_1503 (size=68) 2024-11-23T17:10:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742327_1503 (size=68) 2024-11-23T17:10:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742328_1504 (size=68) 2024-11-23T17:10:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742327_1503 (size=68) 2024-11-23T17:10:26,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742328_1504 (size=68) 2024-11-23T17:10:26,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742328_1504 (size=68) 2024-11-23T17:10:26,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-11-23T17:10:26,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:26,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-11-23T17:10:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-11-23T17:10:26,232 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-11-23T17:10:26,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:26,232 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,232 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:26,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 in 167 msec 2024-11-23T17:10:26,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-11-23T17:10:26,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 in 167 msec 2024-11-23T17:10:26,235 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:26,236 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:26,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:26,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:26,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:26,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:10:26,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742329_1505 (size=60) 2024-11-23T17:10:26,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742329_1505 (size=60) 2024-11-23T17:10:26,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742329_1505 (size=60) 2024-11-23T17:10:26,244 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:26,244 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-23T17:10:26,244 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-23T17:10:26,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742330_1506 (size=641) 2024-11-23T17:10:26,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742330_1506 (size=641) 2024-11-23T17:10:26,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742330_1506 (size=641) 2024-11-23T17:10:26,258 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:26,262 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:26,262 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-23T17:10:26,263 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:26,263 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T17:10:26,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 208 msec 2024-11-23T17:10:26,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:10:26,378 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:10:26,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37875 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:26,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:10:26,386 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-23T17:10:26,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,388 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:10:26,390 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,394 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,399 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:10:26,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:10:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381826401 (current time:1732381826401). 2024-11-23T17:10:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:10:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T17:10:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:10:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c434c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:26,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:26,403 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2394beaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:26,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,404 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39306, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:26,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5be7e845, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:26,405 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:26,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:26,406 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:26,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:10:26,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:26,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,408 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b15de1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:10:26,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:10:26,409 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:10:26,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:10:26,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:10:26,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b6ce94f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:10:26,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:10:26,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,410 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:10:26,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3477e0cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:10:26,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:10:26,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1] 2024-11-23T17:10:26,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:26,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34540, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:26,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:10:26,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:10:26,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:10:26,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:10:26,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:10:26,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:10:26,417 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:10:26,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:10:26,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T17:10:26,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:10:26,420 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:10:26,421 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:10:26,423 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:10:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742331_1507 (size=156) 2024-11-23T17:10:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742331_1507 (size=156) 2024-11-23T17:10:26,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742331_1507 (size=156) 2024-11-23T17:10:26,433 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:10:26,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5}] 2024-11-23T17:10:26,434 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:26,434 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:10:26,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-11-23T17:10:26,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-11-23T17:10:26,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:26,586 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing afb0f86b4bfaa9a4dd331ad4691ba018 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T17:10:26,586 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing de3ad9c9965ac0ad50fdb7f1172460e5 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T17:10:26,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 is 71, key is 010069931880c5da566b5f63d1452a23/cf:q/1732381826383/Put/seqid=0 2024-11-23T17:10:26,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 is 71, key is 1bccd6ca3429507facba096f8e686716/cf:q/1732381826385/Put/seqid=0 2024-11-23T17:10:26,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742332_1508 (size=5311) 2024-11-23T17:10:26,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742332_1508 (size=5311) 2024-11-23T17:10:26,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742332_1508 (size=5311) 2024-11-23T17:10:26,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:26,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742333_1509 (size=7962) 2024-11-23T17:10:26,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742333_1509 (size=7962) 2024-11-23T17:10:26,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742333_1509 (size=7962) 2024-11-23T17:10:26,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:26,615 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,615 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/.tmp/cf/30c5874a0c0a443fa9ed199e91c5784c, store: [table=testtb-testExportWithChecksum family=cf region=afb0f86b4bfaa9a4dd331ad4691ba018] 2024-11-23T17:10:26,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/.tmp/cf/30c5874a0c0a443fa9ed199e91c5784c is 206, key is 02d8f62982e4822cf55f2980a22646fd2/cf:q/1732381826383/Put/seqid=0 2024-11-23T17:10:26,617 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:26,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/.tmp/cf/01fce57c971b44ae8ef5b3e395999fde, store: [table=testtb-testExportWithChecksum family=cf region=de3ad9c9965ac0ad50fdb7f1172460e5] 2024-11-23T17:10:26,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/.tmp/cf/01fce57c971b44ae8ef5b3e395999fde is 206, key is 1364e692a7a413919989c54720a4eb3b8/cf:q/1732381826385/Put/seqid=0 2024-11-23T17:10:26,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742334_1510 (size=6510) 2024-11-23T17:10:26,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742334_1510 (size=6510) 2024-11-23T17:10:26,627 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/.tmp/cf/30c5874a0c0a443fa9ed199e91c5784c 2024-11-23T17:10:26,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742334_1510 (size=6510) 2024-11-23T17:10:26,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742335_1511 (size=14249) 2024-11-23T17:10:26,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742335_1511 (size=14249) 2024-11-23T17:10:26,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742335_1511 (size=14249) 2024-11-23T17:10:26,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/.tmp/cf/01fce57c971b44ae8ef5b3e395999fde 2024-11-23T17:10:26,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/.tmp/cf/30c5874a0c0a443fa9ed199e91c5784c as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c 2024-11-23T17:10:26,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/.tmp/cf/01fce57c971b44ae8ef5b3e395999fde as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde 2024-11-23T17:10:26,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c, entries=6, sequenceid=6, filesize=6.4 K 2024-11-23T17:10:26,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde, entries=44, sequenceid=6, filesize=13.9 K 2024-11-23T17:10:26,642 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for afb0f86b4bfaa9a4dd331ad4691ba018 in 56ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:26,642 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for de3ad9c9965ac0ad50fdb7f1172460e5 in 56ms, sequenceid=6, compaction requested=false 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for afb0f86b4bfaa9a4dd331ad4691ba018: 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for de3ad9c9965ac0ad50fdb7f1172460e5: 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. for snaptb0-testExportWithChecksum completed. 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. for snaptb0-testExportWithChecksum completed. 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde] hfiles 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c] hfiles 2024-11-23T17:10:26,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:10:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742337_1513 (size=107) 2024-11-23T17:10:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742337_1513 (size=107) 2024-11-23T17:10:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742337_1513 (size=107) 2024-11-23T17:10:26,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742336_1512 (size=107) 2024-11-23T17:10:26,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742336_1512 (size=107) 2024-11-23T17:10:26,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742336_1512 (size=107) 2024-11-23T17:10:26,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:10:26,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-11-23T17:10:26,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-11-23T17:10:26,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,670 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:26,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018 in 238 msec 2024-11-23T17:10:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:10:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:10:27,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:10:27,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-11-23T17:10:27,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-11-23T17:10:27,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:27,063 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:27,069 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:10:27,070 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:10:27,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-11-23T17:10:27,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5 in 633 msec 2024-11-23T17:10:27,071 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:10:27,071 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:10:27,071 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:27,074 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018] hfiles 2024-11-23T17:10:27,074 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:10:27,074 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:10:27,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742338_1514 (size=291) 2024-11-23T17:10:27,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742338_1514 (size=291) 2024-11-23T17:10:27,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742338_1514 (size=291) 2024-11-23T17:10:27,089 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:10:27,089 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-23T17:10:27,090 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:10:27,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742339_1515 (size=951) 2024-11-23T17:10:27,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742339_1515 (size=951) 2024-11-23T17:10:27,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742339_1515 (size=951) 2024-11-23T17:10:27,129 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:10:27,142 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:10:27,142 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:10:27,144 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:10:27,144 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T17:10:27,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 727 msec 2024-11-23T17:10:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:10:27,558 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:10:27,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558 2024-11-23T17:10:27,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:27,601 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:10:27,601 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@2b2fb4ec, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:10:27,603 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:10:27,609 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:10:27,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:27,654 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:27,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-2593377002969548044.jar 2024-11-23T17:10:29,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-5039789822688095458.jar 2024-11-23T17:10:29,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:10:29,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:10:29,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:10:29,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:10:29,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:10:29,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:10:29,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:10:29,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:10:29,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:10:29,127 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:10:29,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:10:29,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:10:29,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:29,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:29,128 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:29,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:29,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:10:29,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:29,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:10:29,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742340_1516 (size=440956) 2024-11-23T17:10:29,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742340_1516 (size=440956) 2024-11-23T17:10:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742340_1516 (size=440956) 2024-11-23T17:10:29,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742341_1517 (size=24020) 2024-11-23T17:10:29,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742341_1517 (size=24020) 2024-11-23T17:10:29,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742341_1517 (size=24020) 2024-11-23T17:10:29,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742342_1518 (size=77755) 2024-11-23T17:10:29,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742342_1518 (size=77755) 2024-11-23T17:10:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742342_1518 (size=77755) 2024-11-23T17:10:29,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742343_1519 (size=131360) 2024-11-23T17:10:29,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742343_1519 (size=131360) 2024-11-23T17:10:29,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742343_1519 (size=131360) 2024-11-23T17:10:29,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742344_1520 (size=111793) 2024-11-23T17:10:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742344_1520 (size=111793) 2024-11-23T17:10:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742344_1520 (size=111793) 2024-11-23T17:10:29,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:10:29,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:10:29,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:10:29,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742346_1522 (size=8360005) 2024-11-23T17:10:29,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742346_1522 (size=8360005) 2024-11-23T17:10:29,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742346_1522 (size=8360005) 2024-11-23T17:10:29,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:10:29,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:10:29,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:10:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742348_1524 (size=322274) 2024-11-23T17:10:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742348_1524 (size=322274) 2024-11-23T17:10:29,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742348_1524 (size=322274) 2024-11-23T17:10:29,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0008_000001 (auth:SIMPLE) from 127.0.0.1:41156 2024-11-23T17:10:29,555 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0008/container_1732381541272_0008_01_000001/launch_container.sh] 2024-11-23T17:10:29,555 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0008/container_1732381541272_0008_01_000001/container_tokens] 2024-11-23T17:10:29,555 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0008/container_1732381541272_0008_01_000001/sysfs] 2024-11-23T17:10:29,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742349_1525 (size=20406) 2024-11-23T17:10:29,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742349_1525 (size=20406) 2024-11-23T17:10:29,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742349_1525 (size=20406) 2024-11-23T17:10:29,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742350_1526 (size=45609) 2024-11-23T17:10:29,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742350_1526 (size=45609) 2024-11-23T17:10:29,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742350_1526 (size=45609) 2024-11-23T17:10:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742351_1527 (size=136454) 2024-11-23T17:10:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742351_1527 (size=136454) 2024-11-23T17:10:29,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742351_1527 (size=136454) 2024-11-23T17:10:29,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742352_1528 (size=1597270) 2024-11-23T17:10:29,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742352_1528 (size=1597270) 2024-11-23T17:10:29,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742352_1528 (size=1597270) 2024-11-23T17:10:29,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742353_1529 (size=30873) 2024-11-23T17:10:29,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742353_1529 (size=30873) 2024-11-23T17:10:29,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742353_1529 (size=30873) 2024-11-23T17:10:29,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742354_1530 (size=29229) 2024-11-23T17:10:29,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742354_1530 (size=29229) 2024-11-23T17:10:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742354_1530 (size=29229) 2024-11-23T17:10:29,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742355_1531 (size=903661) 2024-11-23T17:10:29,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742355_1531 (size=903661) 2024-11-23T17:10:29,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742355_1531 (size=903661) 2024-11-23T17:10:30,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742356_1532 (size=6424742) 2024-11-23T17:10:30,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742356_1532 (size=6424742) 2024-11-23T17:10:30,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742356_1532 (size=6424742) 2024-11-23T17:10:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742357_1533 (size=5175431) 2024-11-23T17:10:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742357_1533 (size=5175431) 2024-11-23T17:10:30,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742357_1533 (size=5175431) 2024-11-23T17:10:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742358_1534 (size=232881) 2024-11-23T17:10:30,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742358_1534 (size=232881) 2024-11-23T17:10:30,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742358_1534 (size=232881) 2024-11-23T17:10:30,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742359_1535 (size=1323991) 2024-11-23T17:10:30,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742359_1535 (size=1323991) 2024-11-23T17:10:30,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742359_1535 (size=1323991) 2024-11-23T17:10:30,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742360_1536 (size=4695811) 2024-11-23T17:10:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742360_1536 (size=4695811) 2024-11-23T17:10:30,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742360_1536 (size=4695811) 2024-11-23T17:10:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742361_1537 (size=1877034) 2024-11-23T17:10:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742361_1537 (size=1877034) 2024-11-23T17:10:30,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742361_1537 (size=1877034) 2024-11-23T17:10:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742362_1538 (size=217555) 2024-11-23T17:10:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742362_1538 (size=217555) 2024-11-23T17:10:30,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742362_1538 (size=217555) 2024-11-23T17:10:30,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742363_1539 (size=4188619) 2024-11-23T17:10:30,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742363_1539 (size=4188619) 2024-11-23T17:10:30,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742363_1539 (size=4188619) 2024-11-23T17:10:30,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742364_1540 (size=127628) 2024-11-23T17:10:30,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742364_1540 (size=127628) 2024-11-23T17:10:30,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742364_1540 (size=127628) 2024-11-23T17:10:30,157 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:10:30,159 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T17:10:30,162 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.9 K 2024-11-23T17:10:30,162 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-11-23T17:10:30,162 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.4 K 2024-11-23T17:10:30,162 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-11-23T17:10:30,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742365_1541 (size=1023) 2024-11-23T17:10:30,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742365_1541 (size=1023) 2024-11-23T17:10:30,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742365_1541 (size=1023) 2024-11-23T17:10:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742366_1542 (size=35) 2024-11-23T17:10:30,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742366_1542 (size=35) 2024-11-23T17:10:30,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742366_1542 (size=35) 2024-11-23T17:10:30,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742367_1543 (size=304123) 2024-11-23T17:10:30,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742367_1543 (size=304123) 2024-11-23T17:10:30,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742367_1543 (size=304123) 2024-11-23T17:10:30,210 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:10:30,210 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:10:30,528 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:41172 2024-11-23T17:10:30,597 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:10:33,179 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:10:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T17:10:34,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-23T17:10:34,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T17:10:34,794 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T17:10:34,858 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=30, evicted=0, evictedPerRun=0.0 2024-11-23T17:10:34,893 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T17:10:35,015 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-23T17:10:35,015 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T17:10:36,665 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:10:36,669 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:10:36,700 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:10:36,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:49268 2024-11-23T17:10:36,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742368_1544 (size=349821) 2024-11-23T17:10:36,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742368_1544 (size=349821) 2024-11-23T17:10:36,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742368_1544 (size=349821) 2024-11-23T17:10:37,607 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5c659a536d4a93accf1b401f39f6f967 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:10:37,608 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region afb0f86b4bfaa9a4dd331ad4691ba018 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:10:37,608 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region de3ad9c9965ac0ad50fdb7f1172460e5 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:10:37,608 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region d701f2f82212e8f3b780ec9dae20e678 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:10:37,611 DEBUG [master/1a7c5470b472:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-23T17:10:37,611 INFO [master/1a7c5470b472:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-23T17:10:37,611 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 4 regions 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 1 regions 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:10:37,612 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:10:37,612 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:10:37,612 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:10:37,613 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:10:37,613 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-11-23T17:10:37,616 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.StochasticLoadBalancer(376): Running balancer because cluster has sloppy server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-23T17:10:37,616 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.42833607907743, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); computedMaxSteps=14400 2024-11-23T17:10:37,622 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 298286 ms 2024-11-23T17:10:37,802 INFO [master/1a7c5470b472:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 188 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.42833607907743 to a new imbalance of 0.017635723334273525. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8209636651948572, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8533464809192823, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-23T17:10:37,806 INFO [master/1a7c5470b472:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-11-23T17:10:37,806 INFO [master/1a7c5470b472:0.Chore.1 {}] master.HMaster(2172): balance hri=1588230740, source=1a7c5470b472,37875,1732381534675, destination=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:37,808 DEBUG [master/1a7c5470b472:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-23T17:10:37,808 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-23T17:10:37,809 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:37,810 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a7c5470b472,37875,1732381534675, state=CLOSING 2024-11-23T17:10:37,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:37,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:37,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:37,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:37,814 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:37,814 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:37,814 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:37,814 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:37,814 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-23T17:10:37,814 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:37,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:37,969 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(122): Close 1588230740 2024-11-23T17:10:37,969 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:37,969 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T17:10:37,969 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T17:10:37,969 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T17:10:37,969 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T17:10:37,969 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T17:10:37,969 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=76.94 KB heapSize=121.80 KB 2024-11-23T17:10:37,996 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/9c9f0ba6770c4ef6a377c4057eed99dc is 181, key is testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5./info:regioninfo/1732381825748/Put/seqid=0 2024-11-23T17:10:38,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742369_1545 (size=17550) 2024-11-23T17:10:38,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742369_1545 (size=17550) 2024-11-23T17:10:38,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742369_1545 (size=17550) 2024-11-23T17:10:38,004 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65.78 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/9c9f0ba6770c4ef6a377c4057eed99dc 2024-11-23T17:10:38,032 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/5be69196a08a4aa2ac7be1605b9c3b4b is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a./ns:/1732381800067/DeleteFamily/seqid=0 2024-11-23T17:10:38,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742370_1546 (size=7924) 2024-11-23T17:10:38,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742370_1546 (size=7924) 2024-11-23T17:10:38,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742370_1546 (size=7924) 2024-11-23T17:10:38,040 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.37 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/5be69196a08a4aa2ac7be1605b9c3b4b 2024-11-23T17:10:38,059 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/a294ac240c73416ea5290ca936d95689 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a./rep_barrier:/1732381800067/DeleteFamily/seqid=0 2024-11-23T17:10:38,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742371_1547 (size=8195) 2024-11-23T17:10:38,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742371_1547 (size=8195) 2024-11-23T17:10:38,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742371_1547 (size=8195) 2024-11-23T17:10:38,066 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.49 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/a294ac240c73416ea5290ca936d95689 2024-11-23T17:10:38,086 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/e64d6d7045e0401d991576750912c296 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732381780646.101983e8b6b249eafb04f77c0ff5026a./table:/1732381800067/DeleteFamily/seqid=0 2024-11-23T17:10:38,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742372_1548 (size=9051) 2024-11-23T17:10:38,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742372_1548 (size=9051) 2024-11-23T17:10:38,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742372_1548 (size=9051) 2024-11-23T17:10:38,094 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/e64d6d7045e0401d991576750912c296 2024-11-23T17:10:38,101 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/9c9f0ba6770c4ef6a377c4057eed99dc as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/info/9c9f0ba6770c4ef6a377c4057eed99dc 2024-11-23T17:10:38,105 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/info/9c9f0ba6770c4ef6a377c4057eed99dc, entries=96, sequenceid=211, filesize=17.1 K 2024-11-23T17:10:38,106 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/5be69196a08a4aa2ac7be1605b9c3b4b as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/ns/5be69196a08a4aa2ac7be1605b9c3b4b 2024-11-23T17:10:38,113 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/ns/5be69196a08a4aa2ac7be1605b9c3b4b, entries=24, sequenceid=211, filesize=7.7 K 2024-11-23T17:10:38,114 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/a294ac240c73416ea5290ca936d95689 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/rep_barrier/a294ac240c73416ea5290ca936d95689 2024-11-23T17:10:38,118 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/rep_barrier/a294ac240c73416ea5290ca936d95689, entries=22, sequenceid=211, filesize=8.0 K 2024-11-23T17:10:38,119 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/e64d6d7045e0401d991576750912c296 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/table/e64d6d7045e0401d991576750912c296 2024-11-23T17:10:38,123 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/table/e64d6d7045e0401d991576750912c296, entries=39, sequenceid=211, filesize=8.8 K 2024-11-23T17:10:38,125 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 155ms, sequenceid=211, compaction requested=false 2024-11-23T17:10:38,131 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/recovered.edits/214.seqid, newMaxSeqId=214, maxSeqId=1 2024-11-23T17:10:38,132 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:38,132 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:10:38,132 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T17:10:38,132 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732381837969Running coprocessor pre-close hooks at 1732381837969Disabling compacts and flushes for region at 1732381837969Disabling writes for close at 1732381837969Obtaining lock to block concurrent updates at 1732381837969Preparing flush snapshotting stores in 1588230740 at 1732381837969Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=78789, getHeapSize=124664, getOffHeapSize=0, getCellsCount=595 at 1732381837970 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732381837970Flushing 1588230740/info: creating writer at 1732381837970Flushing 1588230740/info: appending metadata at 1732381837995 (+25 ms)Flushing 1588230740/info: closing flushed file at 1732381837995Flushing 1588230740/ns: creating writer at 1732381838010 (+15 ms)Flushing 1588230740/ns: appending metadata at 1732381838031 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732381838031Flushing 1588230740/rep_barrier: creating writer at 1732381838045 (+14 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732381838059 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732381838059Flushing 1588230740/table: creating writer at 1732381838070 (+11 ms)Flushing 1588230740/table: appending metadata at 1732381838086 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732381838086Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6999cacb: reopening flushed file at 1732381838098 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d04c337: reopening flushed file at 1732381838106 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35311c40: reopening flushed file at 1732381838113 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a2827d4: reopening flushed file at 1732381838118 (+5 ms)Finished flush of dataSize ~76.94 KB/78789, heapSize ~121.74 KB/124664, currentSize=0 B/0 for 1588230740 in 155ms, sequenceid=211, compaction requested=false at 1732381838125 (+7 ms)Writing region close event to WAL at 1732381838128 (+3 ms)Running coprocessor post-close hooks at 1732381838132 (+4 ms)Closed at 1732381838132 2024-11-23T17:10:38,133 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] regionserver.HRegionServer(3302): Adding 1588230740 move to 1a7c5470b472,44711,1732381534872 record at close sequenceid=211 2024-11-23T17:10:38,135 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META, pid=225}] handler.UnassignRegionHandler(157): Closed 1588230740 2024-11-23T17:10:38,135 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=CLOSED 2024-11-23T17:10:38,136 WARN [PEWorker-2 {}] zookeeper.MetaTableLocator(168): Tried to set null ServerName in hbase:meta; skipping -- ServerName required 2024-11-23T17:10:38,136 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=225, ppid=224, state=RUNNABLE, hasLock=true; CloseRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:38,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-11-23T17:10:38,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseRegionProcedure 1588230740, server=1a7c5470b472,37875,1732381534675 in 322 msec 2024-11-23T17:10:38,139 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE; state=CLOSED, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:10:38,289 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:10:38,290 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:38,293 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a7c5470b472,44711,1732381534872, state=OPENING 2024-11-23T17:10:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,294 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=224, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE 2024-11-23T17:10:38,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=224, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:10:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,295 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,295 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,450 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T17:10:38,450 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:10:38,451 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T17:10:38,452 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1a7c5470b472%2C44711%2C1732381534872.meta, suffix=.meta, logDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872, archiveDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs, maxLogs=32 2024-11-23T17:10:38,468 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872/1a7c5470b472%2C44711%2C1732381534872.meta.1732381838453.meta, exclude list is [], retry=0 2024-11-23T17:10:38,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38951,DS-162d84dd-1d36-456f-980d-af608c2736b0,DISK] 2024-11-23T17:10:38,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44705,DS-060ddb2c-4779-4744-a2e0-1bc04f2e067d,DISK] 2024-11-23T17:10:38,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32895,DS-7c68cd94-8a4b-4f5f-8d5b-d907b4cb7b32,DISK] 2024-11-23T17:10:38,483 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/WALs/1a7c5470b472,44711,1732381534872/1a7c5470b472%2C44711%2C1732381534872.meta.1732381838453.meta 2024-11-23T17:10:38,484 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40223:40223),(127.0.0.1/127.0.0.1:35159:35159),(127.0.0.1/127.0.0.1:46009:46009)] 2024-11-23T17:10:38,484 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:10:38,485 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-23T17:10:38,485 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:38,485 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T17:10:38,485 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T17:10:38,485 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T17:10:38,485 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T17:10:38,485 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:38,486 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T17:10:38,486 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T17:10:38,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T17:10:38,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T17:10:38,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:38,495 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/info/9c9f0ba6770c4ef6a377c4057eed99dc 2024-11-23T17:10:38,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:10:38,495 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T17:10:38,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T17:10:38,496 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:38,501 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/ns/5be69196a08a4aa2ac7be1605b9c3b4b 2024-11-23T17:10:38,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:10:38,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T17:10:38,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T17:10:38,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:38,520 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/rep_barrier/a294ac240c73416ea5290ca936d95689 2024-11-23T17:10:38,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:10:38,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T17:10:38,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T17:10:38,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:38,528 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/table/e64d6d7045e0401d991576750912c296 2024-11-23T17:10:38,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:10:38,528 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T17:10:38,529 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:10:38,530 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740 2024-11-23T17:10:38,531 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T17:10:38,531 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T17:10:38,531 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:10:38,532 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T17:10:38,533 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=215; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67014526, jitterRate=-0.0014057457447052002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:10:38,533 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T17:10:38,534 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732381838486Writing region info on filesystem at 1732381838486Initializing all the Stores at 1732381838487 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381838487Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381838487Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381838487Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732381838487Cleaning up temporary data from old regions at 1732381838531 (+44 ms)Running coprocessor post-open hooks at 1732381838533 (+2 ms)Region opened successfully at 1732381838534 (+1 ms) 2024-11-23T17:10:38,535 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=226, masterSystemTime=1732381838447 2024-11-23T17:10:38,537 DEBUG [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T17:10:38,537 INFO [RS_OPEN_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_META, pid=226}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T17:10:38,537 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=224 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=215, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:38,538 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1a7c5470b472,44711,1732381534872, state=OPEN 2024-11-23T17:10:38,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:10:38,540 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=226, ppid=224, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=1a7c5470b472,44711,1732381534872 2024-11-23T17:10:38,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,540 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:10:38,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-11-23T17:10:38,542 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=1a7c5470b472,44711,1732381534872 in 246 msec 2024-11-23T17:10:38,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, REOPEN/MOVE in 735 msec 2024-11-23T17:10:38,609 INFO [master/1a7c5470b472:0.Chore.1 {}] master.HMaster(2172): balance hri=5c659a536d4a93accf1b401f39f6f967, source=1a7c5470b472,37875,1732381534675, destination=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:38,611 DEBUG [master/1a7c5470b472:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE 2024-11-23T17:10:38,611 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE 2024-11-23T17:10:38,612 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:10:38,614 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=1a7c5470b472,37875,1732381534675, table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:10:38,618 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] ipc.CallRunner(138): callId: 400 service: ClientService methodName: Mutate size: 299 connection: 172.17.0.2:57863 deadline: 1732381898614, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:10:38,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:10:38,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:10:38,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:10:38,716 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=1a7c5470b472,37875,1732381534675, table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:10:38,727 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:10:38,729 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44081, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:10:38,733 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE because future has completed 2024-11-23T17:10:38,733 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:10:38,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:10:38,887 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:38,887 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:10:38,887 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 5c659a536d4a93accf1b401f39f6f967, disabling compactions & flushes 2024-11-23T17:10:38,887 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:38,887 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:38,887 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. after waiting 0 ms 2024-11-23T17:10:38,887 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:38,891 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:10:38,892 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:10:38,892 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:38,892 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 5c659a536d4a93accf1b401f39f6f967: Waiting for close lock at 1732381838887Running coprocessor pre-close hooks at 1732381838887Disabling compacts and flushes for region at 1732381838887Disabling writes for close at 1732381838887Writing region close event to WAL at 1732381838888 (+1 ms)Running coprocessor post-close hooks at 1732381838892 (+4 ms)Closed at 1732381838892 2024-11-23T17:10:38,892 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegionServer(3302): Adding 5c659a536d4a93accf1b401f39f6f967 move to 1a7c5470b472,43453,1732381534825 record at close sequenceid=5 2024-11-23T17:10:38,894 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:38,895 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=CLOSED 2024-11-23T17:10:38,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:10:38,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-11-23T17:10:38,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,37875,1732381534675 in 165 msec 2024-11-23T17:10:38,901 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE; state=CLOSED, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:10:38,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:50118 2024-11-23T17:10:38,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:41458 2024-11-23T17:10:39,051 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:10:39,052 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:39,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE because future has completed 2024-11-23T17:10:39,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:10:39,231 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:39,231 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7752): Opening region: {ENCODED => 5c659a536d4a93accf1b401f39f6f967, NAME => 'testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:10:39,231 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. service=AccessControlService 2024-11-23T17:10:39,231 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:10:39,232 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,232 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:10:39,232 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7794): checking encryption for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,232 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(7797): checking classloading for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,239 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,266 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5c659a536d4a93accf1b401f39f6f967 columnFamilyName cf 2024-11-23T17:10:39,267 DEBUG [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:10:39,274 DEBUG [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/cf/8528eaa103da423aa13583ffe8cb96df 2024-11-23T17:10:39,274 INFO [StoreOpener-5c659a536d4a93accf1b401f39f6f967-1 {}] regionserver.HStore(327): Store=5c659a536d4a93accf1b401f39f6f967/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:10:39,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1038): replaying wal for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,275 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,276 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,277 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1048): stopping wal replay for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,277 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1060): Cleaning up temporary data for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,279 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1093): writing seq id for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,280 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1114): Opened 5c659a536d4a93accf1b401f39f6f967; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70530776, jitterRate=0.05099046230316162}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:10:39,280 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:10:39,281 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegion(1006): Region open journal for 5c659a536d4a93accf1b401f39f6f967: Running coprocessor pre-open hook at 1732381839232Writing region info on filesystem at 1732381839232Initializing all the Stores at 1732381839238 (+6 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381839238Cleaning up temporary data from old regions at 1732381839277 (+39 ms)Running coprocessor post-open hooks at 1732381839280 (+3 ms)Region opened successfully at 1732381839280 2024-11-23T17:10:39,281 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967., pid=229, masterSystemTime=1732381839228 2024-11-23T17:10:39,285 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:39,285 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=229}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:10:39,286 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=5c659a536d4a93accf1b401f39f6f967, regionState=OPEN, openSeqNum=9, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:10:39,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:10:39,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-11-23T17:10:39,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; OpenRegionProcedure 5c659a536d4a93accf1b401f39f6f967, server=1a7c5470b472,43453,1732381534825 in 220 msec 2024-11-23T17:10:39,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5c659a536d4a93accf1b401f39f6f967, REOPEN/MOVE in 682 msec 2024-11-23T17:10:39,313 DEBUG [master/1a7c5470b472:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-23T17:10:39,347 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-23T17:10:39,347 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-23T17:10:39,376 DEBUG [master/1a7c5470b472:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T17:10:39,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:50124 2024-11-23T17:10:39,832 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:41466 2024-11-23T17:10:39,844 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:10:42,784 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0009_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:10:44,181 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T17:10:44,621 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. because 64dcd7fd9fc755d9ffeb5ce90b3151ce/l has an old edit so flush to free WALs after random delay 123411 ms Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:46,478 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000005/launch_container.sh] 2024-11-23T17:10:46,478 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000005/container_tokens] 2024-11-23T17:10:46,478 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000005/sysfs] 2024-11-23T17:10:46,872 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:52230 2024-11-23T17:10:47,336 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d701f2f82212e8f3b780ec9dae20e678, had cached 0 bytes from a total of 5888 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:47,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:52246 2024-11-23T17:10:48,680 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000004/launch_container.sh] 2024-11-23T17:10:48,680 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000004/container_tokens] 2024-11-23T17:10:48,681 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000004/sysfs] 2024-11-23T17:10:48,863 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:45092 2024-11-23T17:10:48,921 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0009_01_000009 while processing FINISH_CONTAINERS event 2024-11-23T17:10:48,943 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000002/launch_container.sh] 2024-11-23T17:10:48,943 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000002/container_tokens] 2024-11-23T17:10:48,943 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:49,882 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:45102 2024-11-23T17:10:51,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000003/launch_container.sh] 2024-11-23T17:10:51,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000003/container_tokens] 2024-11-23T17:10:51,055 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000003/sysfs] 2024-11-23T17:10:51,927 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0009_01_000012 while processing FINISH_CONTAINERS event 2024-11-23T17:10:53,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000007/launch_container.sh] 2024-11-23T17:10:53,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000007/container_tokens] 2024-11-23T17:10:53,389 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:53,598 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000008/launch_container.sh] 2024-11-23T17:10:53,598 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000008/container_tokens] 2024-11-23T17:10:53,598 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000008/sysfs] 2024-11-23T17:10:54,284 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000010/launch_container.sh] 2024-11-23T17:10:54,284 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000010/container_tokens] 2024-11-23T17:10:54,284 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:54,886 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43056 2024-11-23T17:10:54,886 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43058 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:10:55,518 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000011/launch_container.sh] 2024-11-23T17:10:55,518 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000011/container_tokens] 2024-11-23T17:10:55,518 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000011/sysfs] 2024-11-23T17:10:55,891 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43062 2024-11-23T17:10:55,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43072 2024-11-23T17:10:57,542 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0009_01_000017 while processing FINISH_CONTAINERS event 2024-11-23T17:10:58,543 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0009_01_000018 while processing FINISH_CONTAINERS event 2024-11-23T17:10:59,909 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000013/launch_container.sh] 2024-11-23T17:10:59,909 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000013/container_tokens] 2024-11-23T17:10:59,909 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000013/sysfs] 2024-11-23T17:10:59,911 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000014/launch_container.sh] 2024-11-23T17:10:59,911 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000014/container_tokens] 2024-11-23T17:10:59,911 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:11:00,916 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43064 2024-11-23T17:11:00,917 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:57848 2024-11-23T17:11:02,088 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000015/launch_container.sh] 2024-11-23T17:11:02,088 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000015/container_tokens] 2024-11-23T17:11:02,088 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000015/sysfs] 2024-11-23T17:11:02,143 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000016/launch_container.sh] 2024-11-23T17:11:02,143 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000016/container_tokens] 2024-11-23T17:11:02,143 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000016/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/local-export-1732381827558/archive/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:11:03,179 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:11:03,926 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43080 2024-11-23T17:11:03,931 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:57856 2024-11-23T17:11:06,884 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43088 2024-11-23T17:11:06,885 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:43084 2024-11-23T17:11:06,887 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:57872 2024-11-23T17:11:07,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742374_1550 (size=51340) 2024-11-23T17:11:07,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742374_1550 (size=51340) 2024-11-23T17:11:07,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742374_1550 (size=51340) 2024-11-23T17:11:07,062 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732381541272_0009_01_000020 is : 143 2024-11-23T17:11:07,075 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732381541272_0009_01_000021 is : 143 2024-11-23T17:11:07,087 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732381541272_0009_01_000022 is : 143 2024-11-23T17:11:07,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000020/launch_container.sh] 2024-11-23T17:11:07,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000020/container_tokens] 2024-11-23T17:11:07,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000020/sysfs] 2024-11-23T17:11:07,108 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000021/launch_container.sh] 2024-11-23T17:11:07,108 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000021/container_tokens] 2024-11-23T17:11:07,108 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000021/sysfs] 2024-11-23T17:11:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742375_1551 (size=460) 2024-11-23T17:11:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742375_1551 (size=460) 2024-11-23T17:11:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742375_1551 (size=460) 2024-11-23T17:11:07,129 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000022/launch_container.sh] 2024-11-23T17:11:07,130 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000022/container_tokens] 2024-11-23T17:11:07,130 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000022/sysfs] 2024-11-23T17:11:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742376_1552 (size=51340) 2024-11-23T17:11:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742376_1552 (size=51340) 2024-11-23T17:11:07,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742376_1552 (size=51340) 2024-11-23T17:11:07,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742377_1553 (size=349821) 2024-11-23T17:11:07,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742377_1553 (size=349821) 2024-11-23T17:11:07,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742377_1553 (size=349821) 2024-11-23T17:11:07,196 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:57876 2024-11-23T17:11:08,501 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732381541272_0009_m_000003 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:11:08,502 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502 2024-11-23T17:11:08,502 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:08,530 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:08,530 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:11:08,531 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:11:08,535 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:11:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742378_1554 (size=951) 2024-11-23T17:11:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742378_1554 (size=951) 2024-11-23T17:11:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742378_1554 (size=951) 2024-11-23T17:11:08,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742379_1555 (size=156) 2024-11-23T17:11:08,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742379_1555 (size=156) 2024-11-23T17:11:08,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742379_1555 (size=156) 2024-11-23T17:11:08,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:08,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:08,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-15810158999494519463.jar 2024-11-23T17:11:09,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,672 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-16279476905997704442.jar 2024-11-23T17:11:09,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,673 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:09,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:11:09,674 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:11:09,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:11:09,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:11:09,675 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:11:09,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:11:09,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:11:09,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:11:09,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:11:09,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:11:09,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:11:09,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:09,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:09,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:09,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:09,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:09,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:09,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742380_1556 (size=24020) 2024-11-23T17:11:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742380_1556 (size=24020) 2024-11-23T17:11:09,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742380_1556 (size=24020) 2024-11-23T17:11:09,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742381_1557 (size=77755) 2024-11-23T17:11:09,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742381_1557 (size=77755) 2024-11-23T17:11:09,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742381_1557 (size=77755) 2024-11-23T17:11:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742382_1558 (size=440956) 2024-11-23T17:11:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742382_1558 (size=440956) 2024-11-23T17:11:09,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742382_1558 (size=440956) 2024-11-23T17:11:09,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742383_1559 (size=131360) 2024-11-23T17:11:09,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742383_1559 (size=131360) 2024-11-23T17:11:09,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742383_1559 (size=131360) 2024-11-23T17:11:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742384_1560 (size=111793) 2024-11-23T17:11:10,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742384_1560 (size=111793) 2024-11-23T17:11:10,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742384_1560 (size=111793) 2024-11-23T17:11:10,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742385_1561 (size=1832290) 2024-11-23T17:11:10,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742385_1561 (size=1832290) 2024-11-23T17:11:10,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742385_1561 (size=1832290) 2024-11-23T17:11:10,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742386_1562 (size=8360005) 2024-11-23T17:11:10,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742386_1562 (size=8360005) 2024-11-23T17:11:10,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742386_1562 (size=8360005) 2024-11-23T17:11:10,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742387_1563 (size=503880) 2024-11-23T17:11:10,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742387_1563 (size=503880) 2024-11-23T17:11:10,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742387_1563 (size=503880) 2024-11-23T17:11:10,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742388_1564 (size=322274) 2024-11-23T17:11:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742388_1564 (size=322274) 2024-11-23T17:11:10,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742388_1564 (size=322274) 2024-11-23T17:11:10,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742389_1565 (size=20406) 2024-11-23T17:11:10,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742389_1565 (size=20406) 2024-11-23T17:11:10,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742389_1565 (size=20406) 2024-11-23T17:11:10,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742390_1566 (size=45609) 2024-11-23T17:11:10,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742390_1566 (size=45609) 2024-11-23T17:11:10,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742390_1566 (size=45609) 2024-11-23T17:11:10,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742391_1567 (size=136454) 2024-11-23T17:11:10,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742391_1567 (size=136454) 2024-11-23T17:11:10,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742391_1567 (size=136454) 2024-11-23T17:11:10,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742392_1568 (size=1597270) 2024-11-23T17:11:10,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742392_1568 (size=1597270) 2024-11-23T17:11:10,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742392_1568 (size=1597270) 2024-11-23T17:11:10,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742393_1569 (size=30873) 2024-11-23T17:11:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742393_1569 (size=30873) 2024-11-23T17:11:10,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742393_1569 (size=30873) 2024-11-23T17:11:10,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742394_1570 (size=29229) 2024-11-23T17:11:10,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742394_1570 (size=29229) 2024-11-23T17:11:10,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742394_1570 (size=29229) 2024-11-23T17:11:10,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742395_1571 (size=903661) 2024-11-23T17:11:10,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742395_1571 (size=903661) 2024-11-23T17:11:10,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742395_1571 (size=903661) 2024-11-23T17:11:10,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742396_1572 (size=6424742) 2024-11-23T17:11:10,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742396_1572 (size=6424742) 2024-11-23T17:11:10,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742396_1572 (size=6424742) 2024-11-23T17:11:10,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region de3ad9c9965ac0ad50fdb7f1172460e5, had cached 0 bytes from a total of 14249 2024-11-23T17:11:10,737 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region afb0f86b4bfaa9a4dd331ad4691ba018, had cached 0 bytes from a total of 6510 2024-11-23T17:11:10,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742397_1573 (size=5175431) 2024-11-23T17:11:10,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742397_1573 (size=5175431) 2024-11-23T17:11:10,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742397_1573 (size=5175431) 2024-11-23T17:11:10,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742398_1574 (size=232881) 2024-11-23T17:11:10,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742398_1574 (size=232881) 2024-11-23T17:11:10,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742398_1574 (size=232881) 2024-11-23T17:11:10,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742399_1575 (size=1323991) 2024-11-23T17:11:10,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742399_1575 (size=1323991) 2024-11-23T17:11:10,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742399_1575 (size=1323991) 2024-11-23T17:11:10,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742400_1576 (size=4695811) 2024-11-23T17:11:10,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742400_1576 (size=4695811) 2024-11-23T17:11:10,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742400_1576 (size=4695811) 2024-11-23T17:11:10,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742401_1577 (size=1877034) 2024-11-23T17:11:10,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742401_1577 (size=1877034) 2024-11-23T17:11:10,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742401_1577 (size=1877034) 2024-11-23T17:11:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742402_1578 (size=217555) 2024-11-23T17:11:10,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742402_1578 (size=217555) 2024-11-23T17:11:10,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742402_1578 (size=217555) 2024-11-23T17:11:11,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742403_1579 (size=4188619) 2024-11-23T17:11:11,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742403_1579 (size=4188619) 2024-11-23T17:11:11,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742403_1579 (size=4188619) 2024-11-23T17:11:11,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742404_1580 (size=127628) 2024-11-23T17:11:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742404_1580 (size=127628) 2024-11-23T17:11:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742404_1580 (size=127628) 2024-11-23T17:11:11,074 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:11:11,076 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T17:11:11,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=13.9 K 2024-11-23T17:11:11,078 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-11-23T17:11:11,079 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.4 K 2024-11-23T17:11:11,079 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-11-23T17:11:11,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742405_1581 (size=1023) 2024-11-23T17:11:11,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742405_1581 (size=1023) 2024-11-23T17:11:11,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742405_1581 (size=1023) 2024-11-23T17:11:11,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742406_1582 (size=35) 2024-11-23T17:11:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742406_1582 (size=35) 2024-11-23T17:11:11,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742406_1582 (size=35) 2024-11-23T17:11:11,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742407_1583 (size=304077) 2024-11-23T17:11:11,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742407_1583 (size=304077) 2024-11-23T17:11:11,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742407_1583 (size=304077) 2024-11-23T17:11:12,171 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000019/launch_container.sh] 2024-11-23T17:11:12,171 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000019/container_tokens] 2024-11-23T17:11:12,171 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000019/sysfs] 2024-11-23T17:11:13,277 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:11:13,278 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:11:13,280 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0009_000001 (auth:SIMPLE) from 127.0.0.1:46032 2024-11-23T17:11:13,296 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000001/launch_container.sh] 2024-11-23T17:11:13,296 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000001/container_tokens] 2024-11-23T17:11:13,297 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0009/container_1732381541272_0009_01_000001/sysfs] 2024-11-23T17:11:14,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:43602 2024-11-23T17:11:18,803 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:60030 2024-11-23T17:11:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742408_1584 (size=349775) 2024-11-23T17:11:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742408_1584 (size=349775) 2024-11-23T17:11:19,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742408_1584 (size=349775) 2024-11-23T17:11:21,051 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:37348 2024-11-23T17:11:21,051 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:33250 2024-11-23T17:11:21,923 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:37364 2024-11-23T17:11:21,930 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:33254 2024-11-23T17:11:23,486 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-11-23T17:11:24,232 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5c659a536d4a93accf1b401f39f6f967, had cached 0 bytes from a total of 14663 2024-11-23T17:11:24,282 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0010_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:11:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742409_1585 (size=14249) 2024-11-23T17:11:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742409_1585 (size=14249) 2024-11-23T17:11:26,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742409_1585 (size=14249) 2024-11-23T17:11:27,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000002/launch_container.sh] 2024-11-23T17:11:27,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000002/container_tokens] 2024-11-23T17:11:27,139 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_1/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000002/sysfs] 2024-11-23T17:11:29,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742411_1587 (size=7962) 2024-11-23T17:11:29,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742411_1587 (size=7962) 2024-11-23T17:11:29,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742411_1587 (size=7962) 2024-11-23T17:11:29,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742412_1588 (size=5311) 2024-11-23T17:11:29,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742412_1588 (size=5311) 2024-11-23T17:11:29,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742412_1588 (size=5311) 2024-11-23T17:11:29,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742413_1589 (size=6510) 2024-11-23T17:11:29,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742413_1589 (size=6510) 2024-11-23T17:11:29,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742413_1589 (size=6510) 2024-11-23T17:11:29,654 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000003/launch_container.sh] 2024-11-23T17:11:29,654 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000003/container_tokens] 2024-11-23T17:11:29,654 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000003/sysfs] 2024-11-23T17:11:29,697 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000005/launch_container.sh] 2024-11-23T17:11:29,697 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000005/container_tokens] 2024-11-23T17:11:29,697 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000005/sysfs] 2024-11-23T17:11:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742410_1586 (size=31734) 2024-11-23T17:11:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742410_1586 (size=31734) 2024-11-23T17:11:29,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742410_1586 (size=31734) 2024-11-23T17:11:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742414_1590 (size=463) 2024-11-23T17:11:29,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742414_1590 (size=463) 2024-11-23T17:11:29,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742414_1590 (size=463) 2024-11-23T17:11:29,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742415_1591 (size=31734) 2024-11-23T17:11:29,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742415_1591 (size=31734) 2024-11-23T17:11:29,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742415_1591 (size=31734) 2024-11-23T17:11:29,780 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000004/launch_container.sh] 2024-11-23T17:11:29,780 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000004/container_tokens] 2024-11-23T17:11:29,780 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000004/sysfs] 2024-11-23T17:11:29,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742416_1592 (size=349775) 2024-11-23T17:11:29,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742416_1592 (size=349775) 2024-11-23T17:11:29,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742416_1592 (size=349775) 2024-11-23T17:11:29,800 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:37368 2024-11-23T17:11:29,807 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:37380 2024-11-23T17:11:29,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:33264 2024-11-23T17:11:31,338 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:11:31,339 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:11:31,345 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-11-23T17:11:31,345 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:11:31,346 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:11:31,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:11:31,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T17:11:31,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T17:11:31,346 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:11:31,347 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T17:11:31,347 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381868502/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T17:11:31,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-23T17:11:31,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,357 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381891357"}]},"ts":"1732381891357"} 2024-11-23T17:11:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T17:11:31,359 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-23T17:11:31,359 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-23T17:11:31,359 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-23T17:11:31,361 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, UNASSIGN}] 2024-11-23T17:11:31,362 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, UNASSIGN 2024-11-23T17:11:31,362 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, UNASSIGN 2024-11-23T17:11:31,363 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=afb0f86b4bfaa9a4dd331ad4691ba018, regionState=CLOSING, regionLocation=1a7c5470b472,37875,1732381534675 2024-11-23T17:11:31,363 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=de3ad9c9965ac0ad50fdb7f1172460e5, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:11:31,365 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=1a7c5470b472,43453,1732381534825, table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:11:31,369 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=1a7c5470b472,37875,1732381534675, table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:11:31,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, UNASSIGN because future has completed 2024-11-23T17:11:31,370 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:11:31,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675}] 2024-11-23T17:11:31,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, UNASSIGN because future has completed 2024-11-23T17:11:31,372 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:11:31,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:11:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T17:11:31,523 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing afb0f86b4bfaa9a4dd331ad4691ba018, disabling compactions & flushes 2024-11-23T17:11:31,524 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. after waiting 0 ms 2024-11-23T17:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:11:31,525 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing de3ad9c9965ac0ad50fdb7f1172460e5, disabling compactions & flushes 2024-11-23T17:11:31,525 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. after waiting 0 ms 2024-11-23T17:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:11:31,529 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:11:31,529 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:11:31,530 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:11:31,530 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:11:31,530 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018. 2024-11-23T17:11:31,530 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5. 2024-11-23T17:11:31,530 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for afb0f86b4bfaa9a4dd331ad4691ba018: Waiting for close lock at 1732381891524Running coprocessor pre-close hooks at 1732381891524Disabling compacts and flushes for region at 1732381891524Disabling writes for close at 1732381891524Writing region close event to WAL at 1732381891525 (+1 ms)Running coprocessor post-close hooks at 1732381891530 (+5 ms)Closed at 1732381891530 2024-11-23T17:11:31,530 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for de3ad9c9965ac0ad50fdb7f1172460e5: Waiting for close lock at 1732381891525Running coprocessor pre-close hooks at 1732381891525Disabling compacts and flushes for region at 1732381891525Disabling writes for close at 1732381891525Writing region close event to WAL at 1732381891526 (+1 ms)Running coprocessor post-close hooks at 1732381891530 (+4 ms)Closed at 1732381891530 2024-11-23T17:11:31,533 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:11:31,533 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=de3ad9c9965ac0ad50fdb7f1172460e5, regionState=CLOSED 2024-11-23T17:11:31,533 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:11:31,535 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=afb0f86b4bfaa9a4dd331ad4691ba018, regionState=CLOSED 2024-11-23T17:11:31,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:11:31,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675 because future has completed 2024-11-23T17:11:31,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=233 2024-11-23T17:11:31,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure de3ad9c9965ac0ad50fdb7f1172460e5, server=1a7c5470b472,43453,1732381534825 in 166 msec 2024-11-23T17:11:31,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=232 2024-11-23T17:11:31,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure afb0f86b4bfaa9a4dd331ad4691ba018, server=1a7c5470b472,37875,1732381534675 in 168 msec 2024-11-23T17:11:31,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=de3ad9c9965ac0ad50fdb7f1172460e5, UNASSIGN in 179 msec 2024-11-23T17:11:31,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-11-23T17:11:31,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=afb0f86b4bfaa9a4dd331ad4691ba018, UNASSIGN in 180 msec 2024-11-23T17:11:31,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-11-23T17:11:31,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 186 msec 2024-11-23T17:11:31,549 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381891548"}]},"ts":"1732381891548"} 2024-11-23T17:11:31,550 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-23T17:11:31,550 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-23T17:11:31,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 198 msec 2024-11-23T17:11:31,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T17:11:31,678 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:11:31,679 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-23T17:11:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,681 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-23T17:11:31,682 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,686 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-23T17:11:31,689 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:11:31,690 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:11:31,692 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/recovered.edits] 2024-11-23T17:11:31,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:11:31,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:11:31,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:11:31,694 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:11:31,694 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/recovered.edits] 2024-11-23T17:11:31,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:31,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:31,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:31,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:11:31,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T17:11:31,699 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:31,700 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:31,700 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:31,701 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:31,707 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/cf/30c5874a0c0a443fa9ed199e91c5784c 2024-11-23T17:11:31,707 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/cf/01fce57c971b44ae8ef5b3e395999fde 2024-11-23T17:11:31,710 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018/recovered.edits/9.seqid 2024-11-23T17:11:31,710 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5/recovered.edits/9.seqid 2024-11-23T17:11:31,711 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:11:31,711 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportWithChecksum/de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:11:31,711 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-23T17:11:31,711 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-23T17:11:31,712 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-23T17:11:31,716 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241123a88ef59888394c0294d0898a4610fd81_de3ad9c9965ac0ad50fdb7f1172460e5 2024-11-23T17:11:31,718 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024112360d553dbc0324de9a88be71f8726a94f_afb0f86b4bfaa9a4dd331ad4691ba018 2024-11-23T17:11:31,718 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-23T17:11:31,721 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,729 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-23T17:11:31,732 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-23T17:11:31,733 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,733 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-23T17:11:31,733 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381891733"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:31,733 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381891733"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:31,735 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:11:31,735 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => afb0f86b4bfaa9a4dd331ad4691ba018, NAME => 'testtb-testExportWithChecksum,,1732381825401.afb0f86b4bfaa9a4dd331ad4691ba018.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => de3ad9c9965ac0ad50fdb7f1172460e5, NAME => 'testtb-testExportWithChecksum,1,1732381825401.de3ad9c9965ac0ad50fdb7f1172460e5.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:11:31,736 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-23T17:11:31,736 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381891736"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:31,738 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-23T17:11:31,740 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:11:31,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 61 msec 2024-11-23T17:11:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T17:11:31,808 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-23T17:11:31,809 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:11:31,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T17:11:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-23T17:11:31,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T17:11:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-23T17:11:31,847 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=830 (was 821) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:37843 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: PacketResponder: BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-93701526_22 at /127.0.0.1:36664 [Receiving block BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-321788833_1 at /127.0.0.1:40068 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-321788833_1 at /127.0.0.1:44824 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 32144) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:54822 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_META-regionserver/1a7c5470b472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9112 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37843 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:40342 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:41302 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33-prefix:1a7c5470b472,44711,1732381534872.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-93701526_22 at /127.0.0.1:53984 [Receiving block BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-93701526_22 at /127.0.0.1:33798 [Receiving block BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1942416587-172.17.0.2-1732381530223:blk_1073742373_1549, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=843 (was 815) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=854 (was 515) - SystemLoadAverage LEAK? -, ProcessCount=27 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1922 (was 2446) 2024-11-23T17:11:31,847 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=830 is superior to 500 2024-11-23T17:11:31,868 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=830, OpenFileDescriptor=843, MaxFileDescriptor=1048576, SystemLoadAverage=854, ProcessCount=26, AvailableMemoryMB=1921 2024-11-23T17:11:31,868 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=830 is superior to 500 2024-11-23T17:11:31,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:11:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:31,872 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:11:31,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-11-23T17:11:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T17:11:31,874 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:11:31,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742417_1593 (size=454) 2024-11-23T17:11:31,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742417_1593 (size=454) 2024-11-23T17:11:31,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742417_1593 (size=454) 2024-11-23T17:11:31,883 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 77e5cce4b78d7dce9f55ede2c0fab96a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:31,884 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 11652adc4e2c2498a5b29c501cdb4e49, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:31,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742418_1594 (size=79) 2024-11-23T17:11:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742418_1594 (size=79) 2024-11-23T17:11:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742418_1594 (size=79) 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 11652adc4e2c2498a5b29c501cdb4e49, disabling compactions & flushes 2024-11-23T17:11:31,902 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. after waiting 0 ms 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:31,902 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:31,902 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 11652adc4e2c2498a5b29c501cdb4e49: Waiting for close lock at 1732381891902Disabling compacts and flushes for region at 1732381891902Disabling writes for close at 1732381891902Writing region close event to WAL at 1732381891902Closed at 1732381891902 2024-11-23T17:11:31,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742419_1595 (size=79) 2024-11-23T17:11:31,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742419_1595 (size=79) 2024-11-23T17:11:31,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742419_1595 (size=79) 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 77e5cce4b78d7dce9f55ede2c0fab96a, disabling compactions & flushes 2024-11-23T17:11:31,905 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. after waiting 0 ms 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:31,905 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:31,905 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 77e5cce4b78d7dce9f55ede2c0fab96a: Waiting for close lock at 1732381891905Disabling compacts and flushes for region at 1732381891905Disabling writes for close at 1732381891905Writing region close event to WAL at 1732381891905Closed at 1732381891905 2024-11-23T17:11:31,906 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:11:31,906 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732381891906"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381891906"}]},"ts":"1732381891906"} 2024-11-23T17:11:31,907 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732381891906"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732381891906"}]},"ts":"1732381891906"} 2024-11-23T17:11:31,909 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:11:31,911 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:11:31,911 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381891911"}]},"ts":"1732381891911"} 2024-11-23T17:11:31,913 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-23T17:11:31,914 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {1a7c5470b472=0} racks are {/default-rack=0} 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:11:31,915 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:11:31,915 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:11:31,915 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:11:31,915 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:11:31,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, ASSIGN}] 2024-11-23T17:11:31,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, ASSIGN 2024-11-23T17:11:31,918 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, ASSIGN 2024-11-23T17:11:31,918 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, ASSIGN; state=OFFLINE, location=1a7c5470b472,43453,1732381534825; forceNewPlan=false, retain=false 2024-11-23T17:11:31,919 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, ASSIGN; state=OFFLINE, location=1a7c5470b472,44711,1732381534872; forceNewPlan=false, retain=false 2024-11-23T17:11:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T17:11:32,070 INFO [1a7c5470b472:46159 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:11:32,071 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=11652adc4e2c2498a5b29c501cdb4e49, regionState=OPENING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:11:32,071 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=77e5cce4b78d7dce9f55ede2c0fab96a, regionState=OPENING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:11:32,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, ASSIGN because future has completed 2024-11-23T17:11:32,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:11:32,076 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, ASSIGN because future has completed 2024-11-23T17:11:32,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:11:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T17:11:32,233 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:32,233 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => 11652adc4e2c2498a5b29c501cdb4e49, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:11:32,233 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. service=AccessControlService 2024-11-23T17:11:32,234 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:11:32,234 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,234 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:11:32,234 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,234 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,243 INFO [StoreOpener-11652adc4e2c2498a5b29c501cdb4e49-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,245 INFO [StoreOpener-11652adc4e2c2498a5b29c501cdb4e49-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 11652adc4e2c2498a5b29c501cdb4e49 columnFamilyName cf 2024-11-23T17:11:32,245 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => 77e5cce4b78d7dce9f55ede2c0fab96a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. service=AccessControlService 2024-11-23T17:11:32,246 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:11:32,246 DEBUG [StoreOpener-11652adc4e2c2498a5b29c501cdb4e49-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,246 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,247 INFO [StoreOpener-11652adc4e2c2498a5b29c501cdb4e49-1 {}] regionserver.HStore(327): Store=11652adc4e2c2498a5b29c501cdb4e49/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:11:32,247 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,248 INFO [StoreOpener-77e5cce4b78d7dce9f55ede2c0fab96a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,248 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,248 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,249 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,249 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,249 INFO [StoreOpener-77e5cce4b78d7dce9f55ede2c0fab96a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77e5cce4b78d7dce9f55ede2c0fab96a columnFamilyName cf 2024-11-23T17:11:32,249 DEBUG [StoreOpener-77e5cce4b78d7dce9f55ede2c0fab96a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:32,250 INFO [StoreOpener-77e5cce4b78d7dce9f55ede2c0fab96a-1 {}] regionserver.HStore(327): Store=77e5cce4b78d7dce9f55ede2c0fab96a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:11:32,250 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,250 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,251 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,251 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,251 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:11:32,252 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,252 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened 11652adc4e2c2498a5b29c501cdb4e49; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69929629, jitterRate=0.042032673954963684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:11:32,252 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,252 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,253 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for 11652adc4e2c2498a5b29c501cdb4e49: Running coprocessor pre-open hook at 1732381892234Writing region info on filesystem at 1732381892234Initializing all the Stores at 1732381892235 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381892235Cleaning up temporary data from old regions at 1732381892249 (+14 ms)Running coprocessor post-open hooks at 1732381892252 (+3 ms)Region opened successfully at 1732381892252 2024-11-23T17:11:32,253 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49., pid=240, masterSystemTime=1732381892227 2024-11-23T17:11:32,254 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,255 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:32,255 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:32,256 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:11:32,256 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=11652adc4e2c2498a5b29c501cdb4e49, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:11:32,256 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened 77e5cce4b78d7dce9f55ede2c0fab96a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59695412, jitterRate=-0.1104690432548523}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:11:32,256 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,256 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for 77e5cce4b78d7dce9f55ede2c0fab96a: Running coprocessor pre-open hook at 1732381892247Writing region info on filesystem at 1732381892247Initializing all the Stores at 1732381892247Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732381892247Cleaning up temporary data from old regions at 1732381892252 (+5 ms)Running coprocessor post-open hooks at 1732381892256 (+4 ms)Region opened successfully at 1732381892256 2024-11-23T17:11:32,257 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a., pid=241, masterSystemTime=1732381892229 2024-11-23T17:11:32,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:11:32,260 DEBUG [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,260 INFO [RS_OPEN_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,261 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=77e5cce4b78d7dce9f55ede2c0fab96a, regionState=OPEN, openSeqNum=2, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:11:32,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-11-23T17:11:32,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825 in 185 msec 2024-11-23T17:11:32,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:11:32,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, ASSIGN in 346 msec 2024-11-23T17:11:32,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=238 2024-11-23T17:11:32,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872 in 188 msec 2024-11-23T17:11:32,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=237 2024-11-23T17:11:32,267 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, ASSIGN in 349 msec 2024-11-23T17:11:32,268 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:11:32,268 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381892268"}]},"ts":"1732381892268"} 2024-11-23T17:11:32,270 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-23T17:11:32,272 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:11:32,273 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-23T17:11:32,287 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:11:32,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:32,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:32,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:32,291 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:32,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,301 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:32,302 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 426 msec 2024-11-23T17:11:32,336 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d701f2f82212e8f3b780ec9dae20e678, had cached 0 bytes from a total of 5888 2024-11-23T17:11:32,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-11-23T17:11:32,498 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:11:32,498 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:32,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37875 {}] ipc.CallRunner(138): callId: 529 service: ClientService methodName: Scan size: 210 connection: 172.17.0.2:58404 deadline: 1732381952498, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:11:32,500 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:11:32,500 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:11:32,500 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,37875,1732381534675, seqNum=-1 with the new location region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=211 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=1a7c5470b472 port=44711 startCode=1732381534872. As of locationSeqNum=211. 2024-11-23T17:11:32,611 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:32,612 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,612 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:11:32,616 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:32,625 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:32,634 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:32,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:11:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381892639 (current time:1732381892639). 2024-11-23T17:11:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:11:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T17:11:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:11:32,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52c3aa57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:11:32,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:11:32,656 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:11:32,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:11:32,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:11:32,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50d89e70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:11:32,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:11:32,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,658 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42948, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:11:32,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66ff4df7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:11:32,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=-1] 2024-11-23T17:11:32,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:32,662 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:32,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:11:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:11:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c02ee51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:11:32,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:11:32,674 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@141de56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:11:32,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,676 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:11:32,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ca17dbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:32,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:11:32,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=-1] 2024-11-23T17:11:32,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:32,680 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:11:32,680 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:32,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:11:32,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:32,684 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:32,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:11:32,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:11:32,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:32,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:11:32,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:11:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:11:32,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:11:32,691 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:11:32,692 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:11:32,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-23T17:11:32,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:11:32,695 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:11:32,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742420_1596 (size=203) 2024-11-23T17:11:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742420_1596 (size=203) 2024-11-23T17:11:32,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742420_1596 (size=203) 2024-11-23T17:11:32,717 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:11:32,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49}] 2024-11-23T17:11:32,719 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:11:32,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-11-23T17:11:32,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 77e5cce4b78d7dce9f55ede2c0fab96a: 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 11652adc4e2c2498a5b29c501cdb4e49: 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:11:32,873 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:11:32,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742421_1597 (size=82) 2024-11-23T17:11:32,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742421_1597 (size=82) 2024-11-23T17:11:32,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742421_1597 (size=82) 2024-11-23T17:11:32,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:32,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-11-23T17:11:32,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-11-23T17:11:32,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,906 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:32,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a in 190 msec 2024-11-23T17:11:32,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742422_1598 (size=82) 2024-11-23T17:11:32,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742422_1598 (size=82) 2024-11-23T17:11:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742422_1598 (size=82) 2024-11-23T17:11:32,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:32,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-11-23T17:11:32,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-11-23T17:11:32,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,944 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:32,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-11-23T17:11:32,948 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:11:32,948 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 in 227 msec 2024-11-23T17:11:32,949 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:11:32,957 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:11:32,957 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:11:32,957 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:32,958 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-23T17:11:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742423_1599 (size=74) 2024-11-23T17:11:32,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742423_1599 (size=74) 2024-11-23T17:11:32,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742423_1599 (size=74) 2024-11-23T17:11:33,000 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:11:33,000 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,001 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:11:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742424_1600 (size=697) 2024-11-23T17:11:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742424_1600 (size=697) 2024-11-23T17:11:33,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742424_1600 (size=697) 2024-11-23T17:11:33,045 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:11:33,064 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:11:33,065 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,067 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:11:33,067 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-23T17:11:33,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 379 msec 2024-11-23T17:11:33,179 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:11:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:11:33,319 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:11:33,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44711 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:11:33,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43453 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:11:33,331 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:33,334 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,335 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:33,335 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:11:33,337 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:33,347 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:33,354 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:11:33,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732381893359 (current time:1732381893359). 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@381ebeee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:11:33,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:11:33,361 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:11:33,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:11:33,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:11:33,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1efb8ac3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:11:33,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:11:33,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,363 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42980, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:11:33,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6670407, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:11:33,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=-1] 2024-11-23T17:11:33,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:33,366 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38918, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:33,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:11:33,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:11:33,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,370 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:11:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b23f899, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ClusterIdFetcher(90): Going to request 1a7c5470b472,46159,-1 for getting cluster id 2024-11-23T17:11:33,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:11:33,373 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '67f7774a-a655-4a94-8a2f-94d7ca986550' 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "67f7774a-a655-4a94-8a2f-94d7ca986550" 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2720bb5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [1a7c5470b472,46159,-1] 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:11:33,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,375 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43008, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:11:33,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772afaec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:11:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:11:33,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=1a7c5470b472,44711,1732381534872, seqNum=-1] 2024-11-23T17:11:33,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:33,379 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:33,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., hostname=1a7c5470b472,37875,1732381534675, seqNum=2] 2024-11-23T17:11:33,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:11:33,383 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59448, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:11:33,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159. 2024-11-23T17:11:33,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor224.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:11:33,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:11:33,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:11:33,384 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:11:33,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:11:33,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:11:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-23T17:11:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T17:11:33,388 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:11:33,391 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:11:33,394 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:11:33,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742425_1601 (size=198) 2024-11-23T17:11:33,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742425_1601 (size=198) 2024-11-23T17:11:33,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742425_1601 (size=198) 2024-11-23T17:11:33,442 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:11:33,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49}] 2024-11-23T17:11:33,443 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:33,444 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:33,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T17:11:33,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44711 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-11-23T17:11:33,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43453 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-11-23T17:11:33,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:33,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:33,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing 77e5cce4b78d7dce9f55ede2c0fab96a 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T17:11:33,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 11652adc4e2c2498a5b29c501cdb4e49 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T17:11:33,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a is 71, key is 0923d8ef90e7209c8462529e5ad36342/cf:q/1732381893328/Put/seqid=0 2024-11-23T17:11:33,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 is 71, key is 11d9d7264d5652f59267981e7ad196a1/cf:q/1732381893329/Put/seqid=0 2024-11-23T17:11:33,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742426_1602 (size=5102) 2024-11-23T17:11:33,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742426_1602 (size=5102) 2024-11-23T17:11:33,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742426_1602 (size=5102) 2024-11-23T17:11:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:33,684 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:33,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/.tmp/cf/20a8139982f540078d5c340067baeaad, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=77e5cce4b78d7dce9f55ede2c0fab96a] 2024-11-23T17:11:33,686 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/.tmp/cf/20a8139982f540078d5c340067baeaad is 220, key is 0651ddd8a5d9029906ea8d8080f771ba4/cf:q/1732381893328/Put/seqid=0 2024-11-23T17:11:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T17:11:33,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742427_1603 (size=8171) 2024-11-23T17:11:33,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742427_1603 (size=8171) 2024-11-23T17:11:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742427_1603 (size=8171) 2024-11-23T17:11:33,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:33,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:33,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/.tmp/cf/fcd82d2a8e844139996b3fe5ff7a8489, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=11652adc4e2c2498a5b29c501cdb4e49] 2024-11-23T17:11:33,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/.tmp/cf/fcd82d2a8e844139996b3fe5ff7a8489 is 220, key is 18d205d4d8def942d6e4c383c7590245c/cf:q/1732381893329/Put/seqid=0 2024-11-23T17:11:33,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742428_1604 (size=5962) 2024-11-23T17:11:33,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742428_1604 (size=5962) 2024-11-23T17:11:33,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742428_1604 (size=5962) 2024-11-23T17:11:33,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/.tmp/cf/20a8139982f540078d5c340067baeaad 2024-11-23T17:11:33,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/.tmp/cf/20a8139982f540078d5c340067baeaad as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad 2024-11-23T17:11:33,760 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad, entries=3, sequenceid=6, filesize=5.8 K 2024-11-23T17:11:33,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 77e5cce4b78d7dce9f55ede2c0fab96a in 167ms, sequenceid=6, compaction requested=false 2024-11-23T17:11:33,763 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for 77e5cce4b78d7dce9f55ede2c0fab96a: 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad] hfiles 2024-11-23T17:11:33,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742429_1605 (size=15525) 2024-11-23T17:11:33,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742429_1605 (size=15525) 2024-11-23T17:11:33,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742429_1605 (size=15525) 2024-11-23T17:11:33,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/.tmp/cf/fcd82d2a8e844139996b3fe5ff7a8489 2024-11-23T17:11:33,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/.tmp/cf/fcd82d2a8e844139996b3fe5ff7a8489 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489 2024-11-23T17:11:33,794 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489, entries=47, sequenceid=6, filesize=15.2 K 2024-11-23T17:11:33,795 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 11652adc4e2c2498a5b29c501cdb4e49 in 198ms, sequenceid=6, compaction requested=false 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 11652adc4e2c2498a5b29c501cdb4e49: 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489] hfiles 2024-11-23T17:11:33,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742430_1606 (size=121) 2024-11-23T17:11:33,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742430_1606 (size=121) 2024-11-23T17:11:33,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742430_1606 (size=121) 2024-11-23T17:11:33,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:33,814 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-11-23T17:11:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-11-23T17:11:33,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:33,815 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:33,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a in 375 msec 2024-11-23T17:11:33,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742431_1607 (size=121) 2024-11-23T17:11:33,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742431_1607 (size=121) 2024-11-23T17:11:33,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742431_1607 (size=121) 2024-11-23T17:11:33,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:33,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/1a7c5470b472:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-11-23T17:11:33,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-11-23T17:11:33,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:33,830 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:33,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=245 2024-11-23T17:11:33,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49 in 389 msec 2024-11-23T17:11:33,833 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:11:33,834 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:11:33,835 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:11:33,835 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:11:33,835 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:11:33,840 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a] hfiles 2024-11-23T17:11:33,840 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:33,840 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:33,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742432_1608 (size=305) 2024-11-23T17:11:33,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742432_1608 (size=305) 2024-11-23T17:11:33,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742432_1608 (size=305) 2024-11-23T17:11:33,852 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:11:33,852 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,852 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742433_1609 (size=1007) 2024-11-23T17:11:33,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742433_1609 (size=1007) 2024-11-23T17:11:33,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742433_1609 (size=1007) 2024-11-23T17:11:33,871 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:11:33,876 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:11:33,878 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:33,879 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:11:33,879 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-11-23T17:11:33,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 494 msec 2024-11-23T17:11:34,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-23T17:11:34,018 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:11:34,018 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018 2024-11-23T17:11:34,018 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:35431, tgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018, rawTgtDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018, srcFsUri=hdfs://localhost:35431, srcDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:34,052 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:35431, inputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33 2024-11-23T17:11:34,052 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:34,054 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:11:34,067 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:34,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742435_1611 (size=198) 2024-11-23T17:11:34,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742435_1611 (size=198) 2024-11-23T17:11:34,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742435_1611 (size=198) 2024-11-23T17:11:34,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742434_1610 (size=1007) 2024-11-23T17:11:34,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742434_1610 (size=1007) 2024-11-23T17:11:34,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742434_1610 (size=1007) 2024-11-23T17:11:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:34,340 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-23T17:11:34,341 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T17:11:34,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:34,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:34,543 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-10600513838518563111.jar 2024-11-23T17:11:35,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop-10755468543451277537.jar 2024-11-23T17:11:35,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,691 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:11:35,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:11:35,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:11:35,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:11:35,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:11:35,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:35,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:35,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:35,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:35,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:11:35,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:35,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:11:35,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742436_1612 (size=24020) 2024-11-23T17:11:35,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742436_1612 (size=24020) 2024-11-23T17:11:35,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742436_1612 (size=24020) 2024-11-23T17:11:35,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742437_1613 (size=77755) 2024-11-23T17:11:35,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742437_1613 (size=77755) 2024-11-23T17:11:35,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742437_1613 (size=77755) 2024-11-23T17:11:35,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742438_1614 (size=131360) 2024-11-23T17:11:35,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742438_1614 (size=131360) 2024-11-23T17:11:35,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742438_1614 (size=131360) 2024-11-23T17:11:35,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742439_1615 (size=111793) 2024-11-23T17:11:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742439_1615 (size=111793) 2024-11-23T17:11:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742439_1615 (size=111793) 2024-11-23T17:11:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742440_1616 (size=1832290) 2024-11-23T17:11:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742440_1616 (size=1832290) 2024-11-23T17:11:35,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742440_1616 (size=1832290) 2024-11-23T17:11:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742441_1617 (size=8360005) 2024-11-23T17:11:35,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742441_1617 (size=8360005) 2024-11-23T17:11:35,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742441_1617 (size=8360005) 2024-11-23T17:11:35,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742442_1618 (size=503880) 2024-11-23T17:11:35,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742442_1618 (size=503880) 2024-11-23T17:11:35,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742442_1618 (size=503880) 2024-11-23T17:11:35,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742443_1619 (size=322274) 2024-11-23T17:11:35,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742443_1619 (size=322274) 2024-11-23T17:11:35,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742443_1619 (size=322274) 2024-11-23T17:11:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742444_1620 (size=20406) 2024-11-23T17:11:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742444_1620 (size=20406) 2024-11-23T17:11:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742444_1620 (size=20406) 2024-11-23T17:11:35,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742445_1621 (size=45609) 2024-11-23T17:11:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742445_1621 (size=45609) 2024-11-23T17:11:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742445_1621 (size=45609) 2024-11-23T17:11:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742446_1622 (size=136454) 2024-11-23T17:11:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742446_1622 (size=136454) 2024-11-23T17:11:35,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742446_1622 (size=136454) 2024-11-23T17:11:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742447_1623 (size=1597270) 2024-11-23T17:11:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742447_1623 (size=1597270) 2024-11-23T17:11:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742447_1623 (size=1597270) 2024-11-23T17:11:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742448_1624 (size=30873) 2024-11-23T17:11:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742448_1624 (size=30873) 2024-11-23T17:11:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742448_1624 (size=30873) 2024-11-23T17:11:35,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742449_1625 (size=29229) 2024-11-23T17:11:35,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742449_1625 (size=29229) 2024-11-23T17:11:35,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742449_1625 (size=29229) 2024-11-23T17:11:35,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742450_1626 (size=903661) 2024-11-23T17:11:35,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742450_1626 (size=903661) 2024-11-23T17:11:35,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742450_1626 (size=903661) 2024-11-23T17:11:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742451_1627 (size=440956) 2024-11-23T17:11:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742451_1627 (size=440956) 2024-11-23T17:11:35,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742451_1627 (size=440956) 2024-11-23T17:11:35,905 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0010_000001 (auth:SIMPLE) from 127.0.0.1:52242 2024-11-23T17:11:35,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742452_1628 (size=6424742) 2024-11-23T17:11:35,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742452_1628 (size=6424742) 2024-11-23T17:11:35,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742452_1628 (size=6424742) 2024-11-23T17:11:35,914 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000001/launch_container.sh] 2024-11-23T17:11:35,914 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000001/container_tokens] 2024-11-23T17:11:35,914 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0010/container_1732381541272_0010_01_000001/sysfs] 2024-11-23T17:11:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742453_1629 (size=5175431) 2024-11-23T17:11:35,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742453_1629 (size=5175431) 2024-11-23T17:11:35,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742453_1629 (size=5175431) 2024-11-23T17:11:35,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742454_1630 (size=232881) 2024-11-23T17:11:35,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742454_1630 (size=232881) 2024-11-23T17:11:35,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742454_1630 (size=232881) 2024-11-23T17:11:35,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742455_1631 (size=1323991) 2024-11-23T17:11:35,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742455_1631 (size=1323991) 2024-11-23T17:11:35,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742455_1631 (size=1323991) 2024-11-23T17:11:35,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742456_1632 (size=4695811) 2024-11-23T17:11:35,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742456_1632 (size=4695811) 2024-11-23T17:11:35,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742456_1632 (size=4695811) 2024-11-23T17:11:35,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742457_1633 (size=1877034) 2024-11-23T17:11:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742457_1633 (size=1877034) 2024-11-23T17:11:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742457_1633 (size=1877034) 2024-11-23T17:11:35,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742458_1634 (size=217555) 2024-11-23T17:11:35,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742458_1634 (size=217555) 2024-11-23T17:11:35,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742458_1634 (size=217555) 2024-11-23T17:11:35,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742459_1635 (size=4188619) 2024-11-23T17:11:35,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742459_1635 (size=4188619) 2024-11-23T17:11:35,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742459_1635 (size=4188619) 2024-11-23T17:11:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742460_1636 (size=127628) 2024-11-23T17:11:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742460_1636 (size=127628) 2024-11-23T17:11:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742460_1636 (size=127628) 2024-11-23T17:11:35,998 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:11:36,000 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-23T17:11:36,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.2 K 2024-11-23T17:11:36,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-23T17:11:36,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-23T17:11:36,001 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-23T17:11:36,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742461_1637 (size=1079) 2024-11-23T17:11:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742461_1637 (size=1079) 2024-11-23T17:11:36,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742461_1637 (size=1079) 2024-11-23T17:11:36,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742462_1638 (size=35) 2024-11-23T17:11:36,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742462_1638 (size=35) 2024-11-23T17:11:36,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742462_1638 (size=35) 2024-11-23T17:11:36,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742463_1639 (size=304251) 2024-11-23T17:11:36,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742463_1639 (size=304251) 2024-11-23T17:11:36,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742463_1639 (size=304251) 2024-11-23T17:11:36,033 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:11:36,033 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:11:36,689 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:54796 2024-11-23T17:11:37,051 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:11:37,575 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5c659a536d4a93accf1b401f39f6f967 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:11:37,575 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 11652adc4e2c2498a5b29c501cdb4e49 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:11:37,575 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:11:37,575 DEBUG [master/1a7c5470b472:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 77e5cce4b78d7dce9f55ede2c0fab96a changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:11:41,195 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:49656 2024-11-23T17:11:41,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742464_1640 (size=349973) 2024-11-23T17:11:41,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742464_1640 (size=349973) 2024-11-23T17:11:41,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742464_1640 (size=349973) 2024-11-23T17:11:43,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:40740 2024-11-23T17:11:43,413 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:50404 2024-11-23T17:11:44,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:50406 2024-11-23T17:11:44,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:40754 2024-11-23T17:11:46,901 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732381541272_0011_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:11:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742465_1641 (size=15525) 2024-11-23T17:11:48,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742465_1641 (size=15525) 2024-11-23T17:11:48,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742465_1641 (size=15525) 2024-11-23T17:11:49,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742467_1643 (size=8171) 2024-11-23T17:11:49,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742467_1643 (size=8171) 2024-11-23T17:11:49,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742467_1643 (size=8171) 2024-11-23T17:11:49,243 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000003/launch_container.sh] 2024-11-23T17:11:49,243 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000003/container_tokens] 2024-11-23T17:11:49,243 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000003/sysfs] 2024-11-23T17:11:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742468_1644 (size=5962) 2024-11-23T17:11:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742468_1644 (size=5962) 2024-11-23T17:11:49,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742468_1644 (size=5962) 2024-11-23T17:11:49,769 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000004/launch_container.sh] 2024-11-23T17:11:49,769 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000004/container_tokens] 2024-11-23T17:11:49,769 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_1/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000004/sysfs] 2024-11-23T17:11:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742469_1645 (size=5102) 2024-11-23T17:11:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742469_1645 (size=5102) 2024-11-23T17:11:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742469_1645 (size=5102) 2024-11-23T17:11:49,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742466_1642 (size=31801) 2024-11-23T17:11:49,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742466_1642 (size=31801) 2024-11-23T17:11:49,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742466_1642 (size=31801) 2024-11-23T17:11:49,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742470_1646 (size=477) 2024-11-23T17:11:49,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742470_1646 (size=477) 2024-11-23T17:11:49,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742470_1646 (size=477) 2024-11-23T17:11:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742471_1647 (size=31801) 2024-11-23T17:11:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742471_1647 (size=31801) 2024-11-23T17:11:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742471_1647 (size=31801) 2024-11-23T17:11:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742472_1648 (size=349973) 2024-11-23T17:11:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742472_1648 (size=349973) 2024-11-23T17:11:49,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742472_1648 (size=349973) 2024-11-23T17:11:49,908 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000005/launch_container.sh] 2024-11-23T17:11:49,908 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000005/container_tokens] 2024-11-23T17:11:49,908 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-1_2/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000005/sysfs] 2024-11-23T17:11:49,919 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:40760 2024-11-23T17:11:49,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:50422 2024-11-23T17:11:51,157 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:11:51,157 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:11:51,163 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,163 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:11:51,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1293158403_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T17:11:51,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/export-test/export-1732381894018/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T17:11:51,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T17:11:51,172 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381911172"}]},"ts":"1732381911172"} 2024-11-23T17:11:51,174 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-23T17:11:51,174 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-23T17:11:51,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-23T17:11:51,176 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, UNASSIGN}] 2024-11-23T17:11:51,176 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, UNASSIGN 2024-11-23T17:11:51,176 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, UNASSIGN 2024-11-23T17:11:51,177 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=77e5cce4b78d7dce9f55ede2c0fab96a, regionState=CLOSING, regionLocation=1a7c5470b472,44711,1732381534872 2024-11-23T17:11:51,177 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=11652adc4e2c2498a5b29c501cdb4e49, regionState=CLOSING, regionLocation=1a7c5470b472,43453,1732381534825 2024-11-23T17:11:51,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, UNASSIGN because future has completed 2024-11-23T17:11:51,179 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:11:51,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825}] 2024-11-23T17:11:51,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, UNASSIGN because future has completed 2024-11-23T17:11:51,179 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:11:51,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872}] 2024-11-23T17:11:51,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T17:11:51,331 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:51,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:11:51,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing 11652adc4e2c2498a5b29c501cdb4e49, disabling compactions & flushes 2024-11-23T17:11:51,331 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:51,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:51,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. after waiting 0 ms 2024-11-23T17:11:51,331 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:51,332 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:51,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:11:51,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing 77e5cce4b78d7dce9f55ede2c0fab96a, disabling compactions & flushes 2024-11-23T17:11:51,332 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:51,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:51,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. after waiting 0 ms 2024-11-23T17:11:51,332 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:51,335 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:11:51,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:11:51,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:11:51,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:11:51,336 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49. 2024-11-23T17:11:51,336 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a. 2024-11-23T17:11:51,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for 11652adc4e2c2498a5b29c501cdb4e49: Waiting for close lock at 1732381911331Running coprocessor pre-close hooks at 1732381911331Disabling compacts and flushes for region at 1732381911331Disabling writes for close at 1732381911331Writing region close event to WAL at 1732381911332 (+1 ms)Running coprocessor post-close hooks at 1732381911336 (+4 ms)Closed at 1732381911336 2024-11-23T17:11:51,336 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for 77e5cce4b78d7dce9f55ede2c0fab96a: Waiting for close lock at 1732381911332Running coprocessor pre-close hooks at 1732381911332Disabling compacts and flushes for region at 1732381911332Disabling writes for close at 1732381911332Writing region close event to WAL at 1732381911333 (+1 ms)Running coprocessor post-close hooks at 1732381911336 (+3 ms)Closed at 1732381911336 2024-11-23T17:11:51,337 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed 11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:51,338 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=11652adc4e2c2498a5b29c501cdb4e49, regionState=CLOSED 2024-11-23T17:11:51,338 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed 77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:51,339 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=77e5cce4b78d7dce9f55ede2c0fab96a, regionState=CLOSED 2024-11-23T17:11:51,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825 because future has completed 2024-11-23T17:11:51,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872 because future has completed 2024-11-23T17:11:51,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=251 2024-11-23T17:11:51,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 11652adc4e2c2498a5b29c501cdb4e49, server=1a7c5470b472,43453,1732381534825 in 161 msec 2024-11-23T17:11:51,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=250 2024-11-23T17:11:51,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure 77e5cce4b78d7dce9f55ede2c0fab96a, server=1a7c5470b472,44711,1732381534872 in 162 msec 2024-11-23T17:11:51,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=11652adc4e2c2498a5b29c501cdb4e49, UNASSIGN in 166 msec 2024-11-23T17:11:51,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=249 2024-11-23T17:11:51,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=77e5cce4b78d7dce9f55ede2c0fab96a, UNASSIGN in 167 msec 2024-11-23T17:11:51,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-11-23T17:11:51,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 171 msec 2024-11-23T17:11:51,347 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732381911347"}]},"ts":"1732381911347"} 2024-11-23T17:11:51,348 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-23T17:11:51,348 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-23T17:11:51,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 179 msec 2024-11-23T17:11:51,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T17:11:51,488 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:11:51,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,490 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,490 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,493 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37875 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,494 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:51,494 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:51,496 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/recovered.edits] 2024-11-23T17:11:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,496 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf, FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/recovered.edits] 2024-11-23T17:11:51,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:11:51,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:11:51,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:11:51,497 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:51,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:11:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-11-23T17:11:51,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:51,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:51,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:51,500 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:11:51,500 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/cf/20a8139982f540078d5c340067baeaad 2024-11-23T17:11:51,500 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/cf/fcd82d2a8e844139996b3fe5ff7a8489 2024-11-23T17:11:51,502 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a/recovered.edits/9.seqid 2024-11-23T17:11:51,502 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/recovered.edits/9.seqid to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49/recovered.edits/9.seqid 2024-11-23T17:11:51,503 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:51,503 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testtb-testExportFileSystemStateWithSkipTmp/11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:51,503 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-23T17:11:51,503 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-23T17:11:51,504 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-23T17:11:51,506 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024112364ab05fe9dcd4ac0a7d6ea02f636c43e_11652adc4e2c2498a5b29c501cdb4e49 2024-11-23T17:11:51,507 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a to hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202411237248a953eb3a4c9083d01724a50f724b_77e5cce4b78d7dce9f55ede2c0fab96a 2024-11-23T17:11:51,507 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-23T17:11:51,508 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,511 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-23T17:11:51,512 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-23T17:11:51,513 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,513 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-23T17:11:51,513 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381911513"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:51,514 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732381911513"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:51,515 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:11:51,515 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 77e5cce4b78d7dce9f55ede2c0fab96a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732381891870.77e5cce4b78d7dce9f55ede2c0fab96a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 11652adc4e2c2498a5b29c501cdb4e49, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:11:51,515 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-23T17:11:51,515 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732381911515"}]},"ts":"9223372036854775807"} 2024-11-23T17:11:51,516 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-23T17:11:51,517 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 29 msec 2024-11-23T17:11:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-11-23T17:11:51,608 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,608 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:11:51,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T17:11:51,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T17:11:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:51,636 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=839 (was 830) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3318) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1308457665_1 at /127.0.0.1:46790 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:42532 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9972 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:40882 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:33853 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33853 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1293158403_22 at /127.0.0.1:44452 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=830 (was 843), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=830 (was 854), ProcessCount=19 (was 26), AvailableMemoryMB=2289 (was 1921) - AvailableMemoryMB LEAK? - 2024-11-23T17:11:51,636 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=839 is superior to 500 2024-11-23T17:11:51,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-23T17:11:51,644 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@56ef7273{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:11:51,646 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6075dd8f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:11:51,646 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:11:51,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35fb350b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:11:51,647 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e5d7b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:11:53,642 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000002/launch_container.sh] 2024-11-23T17:11:53,642 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000002/container_tokens] 2024-11-23T17:11:53,642 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_3/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000002/sysfs] 2024-11-23T17:11:54,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:11:56,000 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732381541272_0011_000001 (auth:SIMPLE) from 127.0.0.1:34062 2024-11-23T17:11:56,012 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000001/launch_container.sh] 2024-11-23T17:11:56,012 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000001/container_tokens] 2024-11-23T17:11:56,012 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_429975666/yarn-5465858302/MiniMRCluster_429975666-localDir-nm-0_0/usercache/jenkins/appcache/application_1732381541272_0011/container_1732381541272_0011_01_000001/sysfs] 2024-11-23T17:11:56,869 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:12:03,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:12:08,486 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42720 2024-11-23T17:12:08,659 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49461f55{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:12:08,659 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41773a70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:12:08,659 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:12:08,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cd7c925{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:12:08,660 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ec8b503{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:12:09,232 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5c659a536d4a93accf1b401f39f6f967, had cached 0 bytes from a total of 14663 2024-11-23T17:12:17,337 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d701f2f82212e8f3b780ec9dae20e678, had cached 0 bytes from a total of 5888 2024-11-23T17:12:25,667 ERROR [Thread[Thread-404,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:12:25,667 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ad0a267{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T17:12:25,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79846073{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:12:25,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:12:25,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9ebb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:12:25,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4543a73d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:12:25,672 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-23T17:12:25,676 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-23T17:12:25,677 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-23T17:12:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741830_1006 (size=1184163) 2024-11-23T17:12:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741830_1006 (size=1184163) 2024-11-23T17:12:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741830_1006 (size=1184163) 2024-11-23T17:12:25,684 ERROR [Thread[Thread-427,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:12:25,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@359f3c9c{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T17:12:25,687 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49b458b8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:12:25,687 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:12:25,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43bd3a04{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:12:25,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1300e646{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:12:25,689 ERROR [Thread[Thread-386,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:12:25,689 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-23T17:12:25,689 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T17:12:25,689 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:12:25,689 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:12:25,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,689 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:12:25,690 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T17:12:25,690 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2066838995, stopped=false 2024-11-23T17:12:25,690 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,690 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:12:25,690 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=1a7c5470b472,46159,1732381533870 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:12:25,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:12:25,692 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T17:12:25,693 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:12:25,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:12:25,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:12:25,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:12:25,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:12:25,693 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:12:25,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a7c5470b472,37875,1732381534675' ***** 2024-11-23T17:12:25,694 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a7c5470b472,43453,1732381534825' ***** 2024-11-23T17:12:25,694 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '1a7c5470b472,44711,1732381534872' ***** 2024-11-23T17:12:25,694 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,694 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:12:25,695 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:12:25,695 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:12:25,695 INFO [RS:2;1a7c5470b472:44711 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:12:25,695 INFO [RS:2;1a7c5470b472:44711 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(3091): Received CLOSE for 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(3091): Received CLOSE for 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:12:25,695 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(3091): Received CLOSE for d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:12:25,695 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(959): stopping server 1a7c5470b472,37875,1732381534675 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(959): stopping server 1a7c5470b472,43453,1732381534825 2024-11-23T17:12:25,695 INFO [RS:0;1a7c5470b472:37875 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:12:25,695 INFO [RS:1;1a7c5470b472:43453 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:12:25,696 INFO [RS:0;1a7c5470b472:37875 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;1a7c5470b472:37875. 2024-11-23T17:12:25,696 INFO [RS:1;1a7c5470b472:43453 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;1a7c5470b472:43453. 2024-11-23T17:12:25,696 DEBUG [RS:0;1a7c5470b472:37875 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:12:25,696 DEBUG [RS:0;1a7c5470b472:37875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,696 DEBUG [RS:1;1a7c5470b472:43453 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:12:25,696 DEBUG [RS:1;1a7c5470b472:43453 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,696 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T17:12:25,696 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1325): Online Regions={64dcd7fd9fc755d9ffeb5ce90b3151ce=hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce.} 2024-11-23T17:12:25,696 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T17:12:25,696 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1325): Online Regions={5c659a536d4a93accf1b401f39f6f967=testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967.} 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5c659a536d4a93accf1b401f39f6f967, disabling compactions & flushes 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 64dcd7fd9fc755d9ffeb5ce90b3151ce, disabling compactions & flushes 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d701f2f82212e8f3b780ec9dae20e678, disabling compactions & flushes 2024-11-23T17:12:25,696 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:12:25,696 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:12:25,696 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. after waiting 0 ms 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. after waiting 0 ms 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. after waiting 0 ms 2024-11-23T17:12:25,696 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:12:25,696 DEBUG [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1351): Waiting on 64dcd7fd9fc755d9ffeb5ce90b3151ce 2024-11-23T17:12:25,696 DEBUG [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1351): Waiting on 5c659a536d4a93accf1b401f39f6f967 2024-11-23T17:12:25,696 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(959): stopping server 1a7c5470b472,44711,1732381534872 2024-11-23T17:12:25,696 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:12:25,696 INFO [RS:2;1a7c5470b472:44711 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:12:25,696 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 64dcd7fd9fc755d9ffeb5ce90b3151ce 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-11-23T17:12:25,696 INFO [RS:2;1a7c5470b472:44711 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;1a7c5470b472:44711. 2024-11-23T17:12:25,696 DEBUG [RS:2;1a7c5470b472:44711 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:12:25,696 DEBUG [RS:2;1a7c5470b472:44711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,697 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:12:25,697 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:12:25,697 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:12:25,697 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T17:12:25,697 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T17:12:25,697 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, d701f2f82212e8f3b780ec9dae20e678=testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678.} 2024-11-23T17:12:25,697 DEBUG [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d701f2f82212e8f3b780ec9dae20e678 2024-11-23T17:12:25,697 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T17:12:25,697 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T17:12:25,697 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T17:12:25,697 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T17:12:25,697 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T17:12:25,698 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=10.46 KB heapSize=17.44 KB 2024-11-23T17:12:25,701 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/5c659a536d4a93accf1b401f39f6f967/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=8 2024-11-23T17:12:25,701 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/default/testExportExpiredSnapshot/d701f2f82212e8f3b780ec9dae20e678/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,702 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5c659a536d4a93accf1b401f39f6f967: Waiting for close lock at 1732381945696Running coprocessor pre-close hooks at 1732381945696Disabling compacts and flushes for region at 1732381945696Disabling writes for close at 1732381945696Writing region close event to WAL at 1732381945697 (+1 ms)Running coprocessor post-close hooks at 1732381945702 (+5 ms)Closed at 1732381945702 2024-11-23T17:12:25,702 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d701f2f82212e8f3b780ec9dae20e678: Waiting for close lock at 1732381945696Running coprocessor pre-close hooks at 1732381945696Disabling compacts and flushes for region at 1732381945696Disabling writes for close at 1732381945696Writing region close event to WAL at 1732381945698 (+2 ms)Running coprocessor post-close hooks at 1732381945702 (+4 ms)Closed at 1732381945702 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967. 2024-11-23T17:12:25,702 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732381801999.d701f2f82212e8f3b780ec9dae20e678. 2024-11-23T17:12:25,703 INFO [regionserver/1a7c5470b472:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,705 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T17:12:25,705 INFO [regionserver/1a7c5470b472:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T17:12:25,714 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/.tmp/l/c3749f05d29e47cb82f8942afdb35a52 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732381800051/DeleteFamily/seqid=0 2024-11-23T17:12:25,714 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/e5780d2dd59d47488c1bc573b267733c is 173, key is testExportExpiredSnapshot,1,1732381801999.5c659a536d4a93accf1b401f39f6f967./info:regioninfo/1732381839285/Put/seqid=0 2024-11-23T17:12:25,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742474_1650 (size=7869) 2024-11-23T17:12:25,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742474_1650 (size=7869) 2024-11-23T17:12:25,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742474_1650 (size=7869) 2024-11-23T17:12:25,720 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.62 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/e5780d2dd59d47488c1bc573b267733c 2024-11-23T17:12:25,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742473_1649 (size=5860) 2024-11-23T17:12:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742473_1649 (size=5860) 2024-11-23T17:12:25,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742473_1649 (size=5860) 2024-11-23T17:12:25,722 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/.tmp/l/c3749f05d29e47cb82f8942afdb35a52 2024-11-23T17:12:25,725 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3749f05d29e47cb82f8942afdb35a52 2024-11-23T17:12:25,726 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/.tmp/l/c3749f05d29e47cb82f8942afdb35a52 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/l/c3749f05d29e47cb82f8942afdb35a52 2024-11-23T17:12:25,732 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3749f05d29e47cb82f8942afdb35a52 2024-11-23T17:12:25,732 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/l/c3749f05d29e47cb82f8942afdb35a52, entries=14, sequenceid=31, filesize=5.7 K 2024-11-23T17:12:25,733 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 64dcd7fd9fc755d9ffeb5ce90b3151ce in 37ms, sequenceid=31, compaction requested=false 2024-11-23T17:12:25,736 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/acl/64dcd7fd9fc755d9ffeb5ce90b3151ce/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-11-23T17:12:25,737 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,737 INFO [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:12:25,737 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 64dcd7fd9fc755d9ffeb5ce90b3151ce: Waiting for close lock at 1732381945696Running coprocessor pre-close hooks at 1732381945696Disabling compacts and flushes for region at 1732381945696Disabling writes for close at 1732381945696Obtaining lock to block concurrent updates at 1732381945696Preparing flush snapshotting stores in 64dcd7fd9fc755d9ffeb5ce90b3151ce at 1732381945696Finished memstore snapshotting hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1732381945697 (+1 ms)Flushing stores of hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. at 1732381945697Flushing 64dcd7fd9fc755d9ffeb5ce90b3151ce/l: creating writer at 1732381945697Flushing 64dcd7fd9fc755d9ffeb5ce90b3151ce/l: appending metadata at 1732381945714 (+17 ms)Flushing 64dcd7fd9fc755d9ffeb5ce90b3151ce/l: closing flushed file at 1732381945714Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36723029: reopening flushed file at 1732381945725 (+11 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 64dcd7fd9fc755d9ffeb5ce90b3151ce in 37ms, sequenceid=31, compaction requested=false at 1732381945733 (+8 ms)Writing region close event to WAL at 1732381945734 (+1 ms)Running coprocessor post-close hooks at 1732381945737 (+3 ms)Closed at 1732381945737 2024-11-23T17:12:25,737 DEBUG [RS_CLOSE_REGION-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732381537701.64dcd7fd9fc755d9ffeb5ce90b3151ce. 2024-11-23T17:12:25,744 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/de3114dba4a04f3cafac8e32fe0715dd is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49./ns:/1732381911508/DeleteFamily/seqid=0 2024-11-23T17:12:25,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742475_1651 (size=5927) 2024-11-23T17:12:25,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742475_1651 (size=5927) 2024-11-23T17:12:25,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742475_1651 (size=5927) 2024-11-23T17:12:25,749 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=430 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/de3114dba4a04f3cafac8e32fe0715dd 2024-11-23T17:12:25,768 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/55e5544165ca4f8f9b90e844eb900107 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49./rep_barrier:/1732381911508/DeleteFamily/seqid=0 2024-11-23T17:12:25,771 INFO [regionserver/1a7c5470b472:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742476_1652 (size=5990) 2024-11-23T17:12:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742476_1652 (size=5990) 2024-11-23T17:12:25,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742476_1652 (size=5990) 2024-11-23T17:12:25,772 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=466 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/55e5544165ca4f8f9b90e844eb900107 2024-11-23T17:12:25,789 INFO [regionserver/1a7c5470b472:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,790 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/98a1dc2e8da14f269128715f7944f7ed is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1732381891870.11652adc4e2c2498a5b29c501cdb4e49./table:/1732381911508/DeleteFamily/seqid=0 2024-11-23T17:12:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742477_1653 (size=6012) 2024-11-23T17:12:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742477_1653 (size=6012) 2024-11-23T17:12:25,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742477_1653 (size=6012) 2024-11-23T17:12:25,795 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=996 B at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/98a1dc2e8da14f269128715f7944f7ed 2024-11-23T17:12:25,799 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/info/e5780d2dd59d47488c1bc573b267733c as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/info/e5780d2dd59d47488c1bc573b267733c 2024-11-23T17:12:25,803 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/info/e5780d2dd59d47488c1bc573b267733c, entries=19, sequenceid=246, filesize=7.7 K 2024-11-23T17:12:25,803 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/ns/de3114dba4a04f3cafac8e32fe0715dd as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/ns/de3114dba4a04f3cafac8e32fe0715dd 2024-11-23T17:12:25,806 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/ns/de3114dba4a04f3cafac8e32fe0715dd, entries=4, sequenceid=246, filesize=5.8 K 2024-11-23T17:12:25,807 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/rep_barrier/55e5544165ca4f8f9b90e844eb900107 as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/rep_barrier/55e5544165ca4f8f9b90e844eb900107 2024-11-23T17:12:25,810 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/rep_barrier/55e5544165ca4f8f9b90e844eb900107, entries=4, sequenceid=246, filesize=5.8 K 2024-11-23T17:12:25,811 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/.tmp/table/98a1dc2e8da14f269128715f7944f7ed as hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/table/98a1dc2e8da14f269128715f7944f7ed 2024-11-23T17:12:25,814 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/table/98a1dc2e8da14f269128715f7944f7ed, entries=6, sequenceid=246, filesize=5.9 K 2024-11-23T17:12:25,815 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~10.46 KB/10716, heapSize ~17.38 KB/17792, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=246, compaction requested=false 2024-11-23T17:12:25,818 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/data/hbase/meta/1588230740/recovered.edits/249.seqid, newMaxSeqId=249, maxSeqId=214 2024-11-23T17:12:25,819 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:25,819 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:12:25,819 INFO [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T17:12:25,819 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732381945697Running coprocessor pre-close hooks at 1732381945697Disabling compacts and flushes for region at 1732381945697Disabling writes for close at 1732381945697Obtaining lock to block concurrent updates at 1732381945698 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732381945698Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=10716, getHeapSize=17792, getOffHeapSize=0, getCellsCount=81 at 1732381945698Flushing stores of hbase:meta,,1.1588230740 at 1732381945698Flushing 1588230740/info: creating writer at 1732381945698Flushing 1588230740/info: appending metadata at 1732381945714 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732381945714Flushing 1588230740/ns: creating writer at 1732381945725 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732381945744 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732381945744Flushing 1588230740/rep_barrier: creating writer at 1732381945753 (+9 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732381945767 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732381945767Flushing 1588230740/table: creating writer at 1732381945776 (+9 ms)Flushing 1588230740/table: appending metadata at 1732381945789 (+13 ms)Flushing 1588230740/table: closing flushed file at 1732381945790 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6eec26e3: reopening flushed file at 1732381945799 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d45795: reopening flushed file at 1732381945803 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@525e9320: reopening flushed file at 1732381945807 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a8c4316: reopening flushed file at 1732381945810 (+3 ms)Finished flush of dataSize ~10.46 KB/10716, heapSize ~17.38 KB/17792, currentSize=0 B/0 for 1588230740 in 118ms, sequenceid=246, compaction requested=false at 1732381945815 (+5 ms)Writing region close event to WAL at 1732381945816 (+1 ms)Running coprocessor post-close hooks at 1732381945819 (+3 ms)Closed at 1732381945819 2024-11-23T17:12:25,819 DEBUG [RS_CLOSE_META-regionserver/1a7c5470b472:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T17:12:25,897 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(976): stopping server 1a7c5470b472,37875,1732381534675; all regions closed. 2024-11-23T17:12:25,897 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(976): stopping server 1a7c5470b472,43453,1732381534825; all regions closed. 2024-11-23T17:12:25,897 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(976): stopping server 1a7c5470b472,44711,1732381534872; all regions closed. 2024-11-23T17:12:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742373_1549 (size=13357) 2024-11-23T17:12:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741834_1010 (size=14377) 2024-11-23T17:12:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742373_1549 (size=13357) 2024-11-23T17:12:25,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741834_1010 (size=14377) 2024-11-23T17:12:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741834_1010 (size=14377) 2024-11-23T17:12:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741836_1012 (size=89810) 2024-11-23T17:12:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741836_1012 (size=89810) 2024-11-23T17:12:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741836_1012 (size=89810) 2024-11-23T17:12:25,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742373_1549 (size=13357) 2024-11-23T17:12:25,905 DEBUG [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs 2024-11-23T17:12:25,905 DEBUG [RS:1;1a7c5470b472:43453 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs 2024-11-23T17:12:25,905 DEBUG [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs 2024-11-23T17:12:25,905 INFO [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1a7c5470b472%2C44711%2C1732381534872.meta:.meta(num 1732381838453) 2024-11-23T17:12:25,905 INFO [RS:1;1a7c5470b472:43453 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1a7c5470b472%2C43453%2C1732381534825:(num 1732381536812) 2024-11-23T17:12:25,905 DEBUG [RS:1;1a7c5470b472:43453 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,905 INFO [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1a7c5470b472%2C37875%2C1732381534675.meta:.meta(num 1732381537230) 2024-11-23T17:12:25,905 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,905 INFO [RS:1;1a7c5470b472:43453 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:12:25,905 INFO [RS:1;1a7c5470b472:43453 {}] hbase.ChoreService(370): Chore service for: regionserver/1a7c5470b472:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T17:12:25,906 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:12:25,906 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:12:25,906 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:12:25,906 INFO [RS:1;1a7c5470b472:43453 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:12:25,906 INFO [regionserver/1a7c5470b472:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:12:25,906 INFO [RS:1;1a7c5470b472:43453 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43453 2024-11-23T17:12:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741833_1009 (size=10681) 2024-11-23T17:12:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741833_1009 (size=10681) 2024-11-23T17:12:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741833_1009 (size=10681) 2024-11-23T17:12:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073741835_1011 (size=18714) 2024-11-23T17:12:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741835_1011 (size=18714) 2024-11-23T17:12:25,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073741835_1011 (size=18714) 2024-11-23T17:12:25,910 DEBUG [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs 2024-11-23T17:12:25,910 INFO [RS:2;1a7c5470b472:44711 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1a7c5470b472%2C44711%2C1732381534872:(num 1732381536812) 2024-11-23T17:12:25,910 DEBUG [RS:2;1a7c5470b472:44711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,911 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,911 INFO [RS:2;1a7c5470b472:44711 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:12:25,911 INFO [RS:2;1a7c5470b472:44711 {}] hbase.ChoreService(370): Chore service for: regionserver/1a7c5470b472:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T17:12:25,911 INFO [RS:2;1a7c5470b472:44711 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:12:25,911 INFO [regionserver/1a7c5470b472:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:12:25,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a7c5470b472,43453,1732381534825 2024-11-23T17:12:25,911 INFO [RS:2;1a7c5470b472:44711 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44711 2024-11-23T17:12:25,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:12:25,912 INFO [RS:1;1a7c5470b472:43453 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:12:25,912 DEBUG [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/oldWALs 2024-11-23T17:12:25,912 INFO [RS:0;1a7c5470b472:37875 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 1a7c5470b472%2C37875%2C1732381534675:(num 1732381536828) 2024-11-23T17:12:25,912 DEBUG [RS:0;1a7c5470b472:37875 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:12:25,912 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:12:25,912 INFO [RS:0;1a7c5470b472:37875 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:12:25,912 INFO [RS:0;1a7c5470b472:37875 {}] hbase.ChoreService(370): Chore service for: regionserver/1a7c5470b472:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T17:12:25,913 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a7c5470b472,43453,1732381534825] 2024-11-23T17:12:25,913 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:12:25,913 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:12:25,913 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:12:25,913 INFO [RS:0;1a7c5470b472:37875 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:12:25,913 INFO [regionserver/1a7c5470b472:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:12:25,913 INFO [RS:0;1a7c5470b472:37875 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37875 2024-11-23T17:12:25,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a7c5470b472,44711,1732381534872 2024-11-23T17:12:25,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:12:25,914 INFO [RS:2;1a7c5470b472:44711 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:12:25,922 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a7c5470b472,43453,1732381534825 already deleted, retry=false 2024-11-23T17:12:25,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1a7c5470b472,37875,1732381534675 2024-11-23T17:12:25,922 INFO [RS:0;1a7c5470b472:37875 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:12:25,922 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a7c5470b472,43453,1732381534825 expired; onlineServers=2 2024-11-23T17:12:25,924 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a7c5470b472,44711,1732381534872] 2024-11-23T17:12:25,925 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a7c5470b472,44711,1732381534872 already deleted, retry=false 2024-11-23T17:12:25,925 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a7c5470b472,44711,1732381534872 expired; onlineServers=1 2024-11-23T17:12:25,925 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1a7c5470b472,37875,1732381534675] 2024-11-23T17:12:25,928 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/1a7c5470b472,37875,1732381534675 already deleted, retry=false 2024-11-23T17:12:25,928 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 1a7c5470b472,37875,1732381534675 expired; onlineServers=0 2024-11-23T17:12:25,928 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '1a7c5470b472,46159,1732381533870' ***** 2024-11-23T17:12:25,928 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T17:12:25,929 INFO [M:0;1a7c5470b472:46159 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:12:25,929 INFO [M:0;1a7c5470b472:46159 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:12:25,929 DEBUG [M:0;1a7c5470b472:46159 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T17:12:25,929 DEBUG [M:0;1a7c5470b472:46159 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T17:12:25,929 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.small.0-1732381536408 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.small.0-1732381536408,5,FailOnTimeoutGroup] 2024-11-23T17:12:25,929 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T17:12:25,929 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.large.0-1732381536404 {}] cleaner.HFileCleaner(306): Exit Thread[master/1a7c5470b472:0:becomeActiveMaster-HFileCleaner.large.0-1732381536404,5,FailOnTimeoutGroup] 2024-11-23T17:12:25,929 INFO [M:0;1a7c5470b472:46159 {}] hbase.ChoreService(370): Chore service for: master/1a7c5470b472:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T17:12:25,929 INFO [M:0;1a7c5470b472:46159 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:12:25,929 DEBUG [M:0;1a7c5470b472:46159 {}] master.HMaster(1795): Stopping service threads 2024-11-23T17:12:25,929 INFO [M:0;1a7c5470b472:46159 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T17:12:25,930 INFO [M:0;1a7c5470b472:46159 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T17:12:25,930 INFO [M:0;1a7c5470b472:46159 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T17:12:25,930 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T17:12:25,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T17:12:25,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:12:25,931 DEBUG [M:0;1a7c5470b472:46159 {}] zookeeper.ZKUtil(347): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T17:12:25,931 WARN [M:0;1a7c5470b472:46159 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T17:12:25,932 INFO [M:0;1a7c5470b472:46159 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/.lastflushedseqids 2024-11-23T17:12:25,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32895 is added to blk_1073742478_1654 (size=333) 2024-11-23T17:12:25,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073742478_1654 (size=333) 2024-11-23T17:12:25,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38951 is added to blk_1073742478_1654 (size=333) 2024-11-23T17:12:25,944 INFO [M:0;1a7c5470b472:46159 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T17:12:25,944 INFO [M:0;1a7c5470b472:46159 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T17:12:25,944 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T17:12:25,958 INFO [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:12:25,958 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:12:25,958 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T17:12:25,958 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:12:25,958 INFO [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1007.28 KB heapSize=1.18 MB 2024-11-23T17:12:25,959 ERROR [AsyncFSWAL-0-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:12:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43453-0x10145ca4ecf0002, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44711-0x10145ca4ecf0003, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,023 INFO [RS:2;1a7c5470b472:44711 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:12:26,023 INFO [RS:1;1a7c5470b472:43453 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:12:26,023 INFO [RS:1;1a7c5470b472:43453 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a7c5470b472,43453,1732381534825; zookeeper connection closed. 2024-11-23T17:12:26,023 INFO [RS:2;1a7c5470b472:44711 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a7c5470b472,44711,1732381534872; zookeeper connection closed. 2024-11-23T17:12:26,024 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@11c575ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@11c575ca 2024-11-23T17:12:26,024 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2046b118 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2046b118 2024-11-23T17:12:26,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,024 INFO [RS:0;1a7c5470b472:37875 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:12:26,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37875-0x10145ca4ecf0001, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:12:26,024 INFO [RS:0;1a7c5470b472:37875 {}] regionserver.HRegionServer(1031): Exiting; stopping=1a7c5470b472,37875,1732381534675; zookeeper connection closed. 2024-11-23T17:12:26,024 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6973f7d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6973f7d 2024-11-23T17:12:26,025 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-23T17:12:31,216 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:12:33,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:12:34,340 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:12:39,842 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:13:03,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;1a7c5470b472:46159 237 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 52 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@f99d861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 2 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1871cd9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4779 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 48 Waiting on java.util.concurrent.CountDownLatch$Sync@78baac59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12453 Waited count: 13190 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@22f5908a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@34fe8c5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 951 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 96 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@74a146ab-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3669 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@687fc7f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35431): State: TIMED_WAITING Blocked count: 1 Waited count: 49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 46874 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572c6a67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35431): State: TIMED_WAITING Blocked count: 87 Waited count: 2719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35431): State: TIMED_WAITING Blocked count: 77 Waited count: 2731 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35431): State: TIMED_WAITING Blocked count: 70 Waited count: 2704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35431): State: TIMED_WAITING Blocked count: 68 Waited count: 2723 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35431): State: TIMED_WAITING Blocked count: 97 Waited count: 2711 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(788830510)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@137ae5e2-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 948 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 39753): State: TIMED_WAITING Blocked count: 1 Waited count: 49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 344 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@305ba803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1516 Waited count: 1656 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 480 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 484 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@77b6f05c-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1065665691) connection to localhost/127.0.0.1:35431 from jenkins): State: TIMED_WAITING Blocked count: 1578 Waited count: 1579 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 0 Waited count: 2346 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 948 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44465): State: TIMED_WAITING Blocked count: 1 Waited count: 49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 331 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32d9e39e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1484 Waited count: 1642 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 476 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@2053b94a-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 947 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 181 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 182 (IPC Server idle connection scanner for port 45243): State: TIMED_WAITING Blocked count: 1 Waited count: 49 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 184 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 1 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3355b498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1471 Waited count: 1645 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@6a6171c7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 180 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (java.util.concurrent.ThreadPoolExecutor$Worker@570398a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (IPC Server handler 0 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 487 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@586e080b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49631): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 387 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63a2f446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:49631):): State: WAITING Blocked count: 1 Waited count: 489 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3adfa618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30ee9708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 246 (LeaseRenewer:jenkins@localhost:35431): State: TIMED_WAITING Blocked count: 13 Waited count: 493 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5d93b81b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:49631)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 60 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e29a85f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 22 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c11587e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c026287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 105 Waited count: 470 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f2c1c42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 141 Waited count: 568 Waiting on java.util.concurrent.Semaphore$NonfairSync@5cd43ea8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159): State: WAITING Blocked count: 103 Waited count: 8087 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ac04fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2aa8c296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57749815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f535270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d409e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e04d75d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;1a7c5470b472:46159): State: TIMED_WAITING Blocked count: 12 Waited count: 4116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1098/0x00007f167cf8e6f0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@510f36f4): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 1 Waited count: 4700 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 109 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 46933 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f799c48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b601671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23127d10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453ccd8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (LeaseRenewer:jenkins.hfs.1@localhost:35431): State: TIMED_WAITING Blocked count: 12 Waited count: 490 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 501 (LeaseRenewer:jenkins.hfs.2@localhost:35431): State: TIMED_WAITING Blocked count: 13 Waited count: 492 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (LeaseRenewer:jenkins.hfs.0@localhost:35431): State: TIMED_WAITING Blocked count: 13 Waited count: 492 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 46793 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 708 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 566 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 660 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 586 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-2): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1045 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e152943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1648 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@7c467346 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2338 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 584 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3081 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6704 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6705 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6706 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9170 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 177 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 9171 (ForkJoinPool.commonPool-worker-7): State: WAITING Blocked count: 0 Waited count: 233 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11506 (AsyncFSWAL-1-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bd19af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11510 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:13:33,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:14:03,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;1a7c5470b472:46159 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 52 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@f99d861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 2 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1871cd9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5379 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 54 Waiting on java.util.concurrent.CountDownLatch$Sync@3b363d4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12453 Waited count: 13191 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@22f5908a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@34fe8c5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1071 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 108 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@74a146ab-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3669 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@687fc7f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35431): State: TIMED_WAITING Blocked count: 1 Waited count: 55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 179 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 52839 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572c6a67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35431): State: TIMED_WAITING Blocked count: 87 Waited count: 2780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35431): State: TIMED_WAITING Blocked count: 77 Waited count: 2792 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35431): State: TIMED_WAITING Blocked count: 70 Waited count: 2765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35431): State: TIMED_WAITING Blocked count: 68 Waited count: 2784 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35431): State: TIMED_WAITING Blocked count: 97 Waited count: 2773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 268 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(788830510)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@137ae5e2-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1068 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 39753): State: TIMED_WAITING Blocked count: 1 Waited count: 55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@305ba803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1536 Waited count: 1696 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 544 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@77b6f05c-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1065665691) connection to localhost/127.0.0.1:35431 from jenkins): State: TIMED_WAITING Blocked count: 1638 Waited count: 1639 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 0 Waited count: 2406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1068 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44465): State: TIMED_WAITING Blocked count: 1 Waited count: 55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 351 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32d9e39e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1504 Waited count: 1682 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@2053b94a-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1067 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 181 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 182 (IPC Server idle connection scanner for port 45243): State: TIMED_WAITING Blocked count: 1 Waited count: 55 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 184 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 1 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3355b498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1491 Waited count: 1685 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@6a6171c7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 180 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (java.util.concurrent.ThreadPoolExecutor$Worker@570398a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (IPC Server handler 0 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@586e080b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49631): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 268 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 392 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63a2f446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:49631):): State: WAITING Blocked count: 1 Waited count: 494 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3adfa618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 541 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30ee9708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5d93b81b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:49631)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 60 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e29a85f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 22 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c11587e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c026287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 105 Waited count: 470 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f2c1c42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 141 Waited count: 568 Waiting on java.util.concurrent.Semaphore$NonfairSync@5cd43ea8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159): State: WAITING Blocked count: 103 Waited count: 8087 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ac04fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2aa8c296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57749815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f535270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d409e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e04d75d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;1a7c5470b472:46159): State: TIMED_WAITING Blocked count: 12 Waited count: 4116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1098/0x00007f167cf8e6f0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@510f36f4): State: TIMED_WAITING Blocked count: 0 Waited count: 177 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 1 Waited count: 5300 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 109 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22194065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 52935 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f799c48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b601671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23127d10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453ccd8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 52795 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 551 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 566 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 660 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 586 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-2): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1051 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e152943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1648 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@7c467346 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2338 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 584 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3081 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6704 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6705 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6706 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9171 (ForkJoinPool.commonPool-worker-7): State: WAITING Blocked count: 0 Waited count: 233 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11506 (AsyncFSWAL-1-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bd19af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11510 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:14:33,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:15:03,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;1a7c5470b472:46159 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 52 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@f99d861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 2 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1871cd9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 60 Waiting on java.util.concurrent.CountDownLatch$Sync@74a5b52d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12453 Waited count: 13192 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@22f5908a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@34fe8c5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 120 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@74a146ab-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3669 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@687fc7f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35431): State: TIMED_WAITING Blocked count: 1 Waited count: 61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 58803 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572c6a67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35431): State: TIMED_WAITING Blocked count: 87 Waited count: 2841 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35431): State: TIMED_WAITING Blocked count: 77 Waited count: 2853 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35431): State: TIMED_WAITING Blocked count: 70 Waited count: 2825 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35431): State: TIMED_WAITING Blocked count: 68 Waited count: 2845 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35431): State: TIMED_WAITING Blocked count: 97 Waited count: 2833 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 298 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(788830510)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@137ae5e2-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 39753): State: TIMED_WAITING Blocked count: 1 Waited count: 61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 384 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@305ba803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1556 Waited count: 1736 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 600 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 604 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@77b6f05c-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1065665691) connection to localhost/127.0.0.1:35431 from jenkins): State: TIMED_WAITING Blocked count: 1698 Waited count: 1699 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 0 Waited count: 2466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44465): State: TIMED_WAITING Blocked count: 1 Waited count: 61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 371 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32d9e39e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1524 Waited count: 1722 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 596 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@2053b94a-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1187 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 181 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 182 (IPC Server idle connection scanner for port 45243): State: TIMED_WAITING Blocked count: 1 Waited count: 61 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 184 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 119 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 1 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3355b498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1511 Waited count: 1725 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@6a6171c7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 180 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (java.util.concurrent.ThreadPoolExecutor$Worker@570398a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (IPC Server handler 0 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 625 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@586e080b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49631): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 298 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 396 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63a2f446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:49631):): State: WAITING Blocked count: 1 Waited count: 498 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3adfa618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 545 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30ee9708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5d93b81b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:49631)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 60 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e29a85f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 22 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c11587e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c026287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 105 Waited count: 470 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f2c1c42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 141 Waited count: 568 Waiting on java.util.concurrent.Semaphore$NonfairSync@5cd43ea8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159): State: WAITING Blocked count: 103 Waited count: 8087 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ac04fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2aa8c296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57749815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f535270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d409e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e04d75d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;1a7c5470b472:46159): State: TIMED_WAITING Blocked count: 12 Waited count: 4116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1098/0x00007f167cf8e6f0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@510f36f4): State: TIMED_WAITING Blocked count: 0 Waited count: 197 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 1 Waited count: 5900 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 109 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22194065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 58937 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f799c48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b601671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23127d10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453ccd8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 58796 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 660 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 586 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-2): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1057 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e152943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1648 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@7c467346 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2338 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 584 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3081 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6704 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6705 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6706 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9171 (ForkJoinPool.commonPool-worker-7): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 11506 (AsyncFSWAL-1-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bd19af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11510 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11511 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:15:33,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:15:35,015 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-11-23T17:15:35,015 DEBUG [master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T17:15:42,807 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:16:03,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;1a7c5470b472:46159 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 52 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@f99d861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 2 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1871cd9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 66 Waiting on java.util.concurrent.CountDownLatch$Sync@751f8acd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12453 Waited count: 13193 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@22f5908a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@34fe8c5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1311 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@74a146ab-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3669 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@687fc7f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35431): State: TIMED_WAITING Blocked count: 1 Waited count: 67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 224 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 64768 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572c6a67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35431): State: TIMED_WAITING Blocked count: 87 Waited count: 2902 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35431): State: TIMED_WAITING Blocked count: 77 Waited count: 2914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35431): State: TIMED_WAITING Blocked count: 70 Waited count: 2887 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35431): State: TIMED_WAITING Blocked count: 68 Waited count: 2906 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35431): State: TIMED_WAITING Blocked count: 97 Waited count: 2894 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(788830510)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@137ae5e2-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1308 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 39753): State: TIMED_WAITING Blocked count: 1 Waited count: 67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 404 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@305ba803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1576 Waited count: 1776 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 660 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@77b6f05c-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1065665691) connection to localhost/127.0.0.1:35431 from jenkins): State: TIMED_WAITING Blocked count: 1758 Waited count: 1759 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 0 Waited count: 2526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1308 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44465): State: TIMED_WAITING Blocked count: 1 Waited count: 67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 391 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32d9e39e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1544 Waited count: 1762 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@2053b94a-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1307 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 181 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 182 (IPC Server idle connection scanner for port 45243): State: TIMED_WAITING Blocked count: 1 Waited count: 67 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 184 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@232492f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 1 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3355b498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1531 Waited count: 1765 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15d798e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@6a6171c7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 180 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (java.util.concurrent.ThreadPoolExecutor$Worker@570398a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (IPC Server handler 0 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 659 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 692 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2755bdc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@586e080b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49631): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63a2f446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:49631):): State: WAITING Blocked count: 1 Waited count: 503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3adfa618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 550 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30ee9708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5d93b81b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:49631)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 60 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e29a85f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 22 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c11587e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c026287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 105 Waited count: 470 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f2c1c42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 141 Waited count: 568 Waiting on java.util.concurrent.Semaphore$NonfairSync@5cd43ea8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159): State: WAITING Blocked count: 103 Waited count: 8087 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ac04fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2aa8c296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57749815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f535270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d409e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e04d75d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;1a7c5470b472:46159): State: TIMED_WAITING Blocked count: 12 Waited count: 4116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1098/0x00007f167cf8e6f0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@510f36f4): State: TIMED_WAITING Blocked count: 0 Waited count: 217 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 1 Waited count: 6499 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 109 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22194065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64938 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f799c48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b601671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23127d10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453ccd8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64798 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 661 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 586 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-2): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1063 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e152943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1648 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@7c467346 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2338 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 584 Waiting on java.util.concurrent.ForkJoinPool@1dacc584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3081 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6704 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6705 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6706 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11506 (AsyncFSWAL-1-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bd19af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11511 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11514 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:16:33,182 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:17:03,182 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:17:25,959 DEBUG [M:0;1a7c5470b472:46159 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732381945944Disabling compacts and flushes for region at 1732381945944Disabling writes for close at 1732381945958 (+14 ms)Obtaining lock to block concurrent updates at 1732381945958Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732381945958Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1031457, getHeapSize=1237248, getOffHeapSize=0, getCellsCount=2715 at 1732381945958Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732382245959 (+300001 ms) 2024-11-23T17:17:25,959 WARN [M:0;1a7c5470b472:46159 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-23T17:17:25,961 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:17:25,963 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-23T17:17:25,963 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-23T17:17:25,963 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 2024-11-23T17:17:25,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:17:25,965 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:17:25,965 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 2024-11-23T17:17:25,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;1a7c5470b472:46159 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 52 Waited count: 21 Waiting on java.lang.ref.ReferenceQueue$Lock@f99d861 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 22 Waited count: 29 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 2 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1871cd9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7178 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 72 Waiting on java.util.concurrent.CountDownLatch$Sync@261ccb64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12453 Waited count: 13194 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@22f5908a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@34fe8c5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1431 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@74a146ab-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:39301}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3669 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@687fc7f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 35431): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 239 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 70733 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@572c6a67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 35431): State: TIMED_WAITING Blocked count: 87 Waited count: 2963 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 35431): State: TIMED_WAITING Blocked count: 77 Waited count: 2975 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 35431): State: TIMED_WAITING Blocked count: 70 Waited count: 2948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 35431): State: TIMED_WAITING Blocked count: 68 Waited count: 2967 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 35431): State: TIMED_WAITING Blocked count: 97 Waited count: 2955 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(788830510)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp795976251-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87-acceptor-0@137ae5e2-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:44965}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1428 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 39753): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 1 Waited count: 424 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@305ba803 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1596 Waited count: 1816 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 724 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 39753): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp1421273556-119): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp1421273556-120-acceptor-0@77b6f05c-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:38317}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (IPC Client (1065665691) connection to localhost/127.0.0.1:35431 from jenkins): State: TIMED_WAITING Blocked count: 1818 Waited count: 1819 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (IPC Parameter Sending Thread for localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 0 Waited count: 2586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1428 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44465): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 411 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32d9e39e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1564 Waited count: 1802 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 716 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44465): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f167c42b440.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@2053b94a-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:44953}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1427 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 181 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 182 (IPC Server idle connection scanner for port 45243): State: TIMED_WAITING Blocked count: 1 Waited count: 73 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 184 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 143 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@232492f7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 1 Waited count: 382 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3355b498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431): State: TIMED_WAITING Blocked count: 1551 Waited count: 1805 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15d798e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@6a6171c7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 180 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (java.util.concurrent.ThreadPoolExecutor$Worker@570398a0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (IPC Server handler 0 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 719 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 3 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 758 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 4 on default port 45243): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2755bdc6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@586e080b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49631): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 405 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63a2f446 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:49631):): State: WAITING Blocked count: 1 Waited count: 507 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3adfa618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 554 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30ee9708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@5d93b81b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 493 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:49631)): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 7 Waited count: 60 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e29a85f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 22 Waited count: 78 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c11587e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 5 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@29e769a5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@c026287 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 105 Waited count: 470 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f2c1c42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 141 Waited count: 568 Waiting on java.util.concurrent.Semaphore$NonfairSync@5cd43ea8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46159): State: WAITING Blocked count: 103 Waited count: 8087 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ac04fe4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c91061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2aa8c296 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57749815 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4f535270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=46159): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d409e3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e04d75d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 330 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 105 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;1a7c5470b472:46159): State: TIMED_WAITING Blocked count: 12 Waited count: 4117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1433/0x00007f167d2546d8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/1a7c5470b472:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (org.apache.hadoop.hdfs.PeerCache@510f36f4): State: TIMED_WAITING Blocked count: 0 Waited count: 237 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 1 Waited count: 7099 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 80 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 109 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 408 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 168 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22194065 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 419 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 407 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 70939 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 429 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 41 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 454 (RegionServerTracker-0): State: WAITING Blocked count: 9 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f799c48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 476 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 21 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b601671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 478 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23127d10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/1a7c5470b472:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@453ccd8e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 523 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (region-location-0): State: WAITING Blocked count: 11 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 70799 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 531 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (region-location-1): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-2): State: WAITING Blocked count: 7 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 985 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1069 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1078 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1092 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 68 Waited count: 112 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e152943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1093 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1094 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1238 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1239 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1240 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1290 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1648 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 69 Waiting on java.util.TaskQueue@7c467346 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2011 (region-location-3): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2012 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56b492bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2338 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3081 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6704 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6705 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6706 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11506 (AsyncFSWAL-1-hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData-prefix:1a7c5470b472,46159,1732381533870): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bd19af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11511 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11514 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11519 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11520 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1420/0x00007f167d24ab20.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:17:29,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:17:30,961 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-23T17:17:30,961 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:17:30,962 INFO [M:0;1a7c5470b472:46159 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-23T17:17:30,962 INFO [M:0;1a7c5470b472:46159 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46159 2024-11-23T17:17:30,962 INFO [M:0;1a7c5470b472:46159 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:17:30,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35431/user/jenkins/test-data/7be4c23b-138c-a557-f92a-f14685b2fc33/MasterData/WALs/1a7c5470b472,46159,1732381533870/1a7c5470b472%2C46159%2C1732381533870.1732381535619 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-23T17:17:31,064 INFO [M:0;1a7c5470b472:46159 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:17:31,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:17:31,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46159-0x10145ca4ecf0000, quorum=127.0.0.1:49631, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:17:31,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@367f3488{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:17:31,067 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:17:31,067 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:17:31,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:17:31,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:17:31,069 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T17:17:31,069 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T17:17:31,069 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T17:17:31,069 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1942416587-172.17.0.2-1732381530223 (Datanode Uuid f9f70e51-565f-4cc4-9aa8-ecb1ccfe8fa1) service to localhost/127.0.0.1:35431 2024-11-23T17:17:31,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data5/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data6/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,071 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T17:17:31,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b9a68a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:17:31,073 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:17:31,073 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:17:31,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:17:31,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:17:31,075 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T17:17:31,075 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1942416587-172.17.0.2-1732381530223 (Datanode Uuid ea1be133-7f2f-4b36-bc4f-bacb5152283b) service to localhost/127.0.0.1:35431 2024-11-23T17:17:31,075 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T17:17:31,075 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T17:17:31,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data3/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,076 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data4/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,076 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T17:17:31,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64986ec5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:17:31,078 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:17:31,078 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:17:31,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:17:31,079 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:17:31,079 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T17:17:31,079 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T17:17:31,079 WARN [BP-1942416587-172.17.0.2-1732381530223 heartbeating to localhost/127.0.0.1:35431 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1942416587-172.17.0.2-1732381530223 (Datanode Uuid dbe6d323-d458-4fd8-9c0e-1b05c6e6f31c) service to localhost/127.0.0.1:35431 2024-11-23T17:17:31,079 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T17:17:31,080 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data1/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,080 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/cluster_de4eb8d5-d9e0-c7cf-9c55-240aeab3f534/data/data2/current/BP-1942416587-172.17.0.2-1732381530223 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:17:31,080 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T17:17:31,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5996a1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T17:17:31,087 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:17:31,087 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:17:31,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:17:31,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/4eb8d703-ed27-10c4-fcfd-b0418582a204/hadoop.log.dir/,STOPPED} 2024-11-23T17:17:31,100 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-23T17:17:31,324 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down