2024-12-07 17:22:37,289 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4b8d604b 2024-12-07 17:22:37,313 main DEBUG Took 0.020264 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 17:22:37,314 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 17:22:37,314 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 17:22:37,316 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 17:22:37,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,334 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 17:22:37,364 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,368 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,370 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,370 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,371 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,372 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,373 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,373 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,374 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,374 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,376 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,381 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,382 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,382 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,383 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,383 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,384 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,384 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,385 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,386 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,386 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,387 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,387 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,388 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 17:22:37,389 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,389 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 17:22:37,391 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 17:22:37,395 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 17:22:37,398 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 17:22:37,406 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 17:22:37,416 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 17:22:37,417 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 17:22:37,440 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 17:22:37,461 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 17:22:37,464 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 17:22:37,465 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 17:22:37,465 main DEBUG createAppenders(={Console}) 2024-12-07 17:22:37,466 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4b8d604b initialized 2024-12-07 17:22:37,467 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4b8d604b 2024-12-07 17:22:37,467 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4b8d604b OK. 2024-12-07 17:22:37,468 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 17:22:37,469 main DEBUG OutputStream closed 2024-12-07 17:22:37,469 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 17:22:37,469 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 17:22:37,470 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@37d4349f OK 2024-12-07 17:22:37,626 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 17:22:37,630 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 17:22:37,631 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 17:22:37,632 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 17:22:37,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 17:22:37,633 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 17:22:37,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 17:22:37,634 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 17:22:37,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 17:22:37,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 17:22:37,636 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 17:22:37,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 17:22:37,637 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 17:22:37,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 17:22:37,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 17:22:37,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 17:22:37,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 17:22:37,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 17:22:37,643 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 17:22:37,644 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@5812f68b) with optional ClassLoader: null 2024-12-07 17:22:37,644 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 17:22:37,652 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@5812f68b] started OK. 2024-12-07T17:22:37,674 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-07 17:22:37,677 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 17:22:37,678 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T17:22:38,319 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d 2024-12-07T17:22:38,320 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-07T17:22:38,395 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T17:22:38,661 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T17:22:38,662 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16, deleteOnExit=true 2024-12-07T17:22:38,663 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-07T17:22:38,664 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/test.cache.data in system properties and HBase conf 2024-12-07T17:22:38,664 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T17:22:38,665 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir in system properties and HBase conf 2024-12-07T17:22:38,666 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T17:22:38,666 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T17:22:38,666 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-07T17:22:38,773 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T17:22:38,777 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:22:38,778 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:22:38,779 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T17:22:38,780 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:22:38,780 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T17:22:38,781 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T17:22:38,782 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:22:38,782 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:22:38,783 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T17:22:38,784 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/nfs.dump.dir in system properties and HBase conf 2024-12-07T17:22:38,784 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir in system properties and HBase conf 2024-12-07T17:22:38,785 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:22:38,785 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T17:22:38,786 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T17:22:39,949 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T17:22:40,054 INFO [Time-limited test {}] log.Log(170): Logging initialized @4129ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T17:22:40,177 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:40,316 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:40,467 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:40,484 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:40,486 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:22:40,516 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:40,521 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:40,524 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:40,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7883a2cb{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-44973-hadoop-hdfs-3_4_1-tests_jar-_-any-16213297837115432908/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:22:40,858 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973} 2024-12-07T17:22:40,859 INFO [Time-limited test {}] server.Server(415): Started @4935ms 2024-12-07T17:22:41,329 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:41,338 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:41,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:41,341 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:41,341 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:22:41,343 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5648e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:41,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@231bb61b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:41,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@133e419e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-46687-hadoop-hdfs-3_4_1-tests_jar-_-any-2593686605094655972/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:22:41,519 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687} 2024-12-07T17:22:41,520 INFO [Time-limited test {}] server.Server(415): Started @5596ms 2024-12-07T17:22:41,621 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:22:41,841 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:41,872 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:41,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:41,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:41,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T17:22:41,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a110049{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:41,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@96ca12c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:42,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4f5b39a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-33865-hadoop-hdfs-3_4_1-tests_jar-_-any-8280694021379214887/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:22:42,090 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865} 2024-12-07T17:22:42,090 INFO [Time-limited test {}] server.Server(415): Started @6166ms 2024-12-07T17:22:42,096 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:22:42,300 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:42,333 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:42,417 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,417 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,418 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:42,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:42,419 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T17:22:42,417 WARN [Thread-104 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f4d5ab4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:42,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69e2404e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:42,608 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:22:42,614 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:22:42,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@767cd31a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-33521-hadoop-hdfs-3_4_1-tests_jar-_-any-13332386717768055919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:22:42,630 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521} 2024-12-07T17:22:42,630 INFO [Time-limited test {}] server.Server(415): Started @6706ms 2024-12-07T17:22:42,636 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T17:22:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12b28e7b4dd31144 with lease ID 0xd012592c3c5a44c5: Processing first storage report for DS-958cba15-3134-4320-a3da-a10e5b18094b from datanode DatanodeRegistration(127.0.0.1:45837, datanodeUuid=49b33b62-4bc1-4f0f-8c4e-a12d87d39afa, infoPort=46823, infoSecurePort=0, ipcPort=46497, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12b28e7b4dd31144 with lease ID 0xd012592c3c5a44c5: from storage DS-958cba15-3134-4320-a3da-a10e5b18094b node DatanodeRegistration(127.0.0.1:45837, datanodeUuid=49b33b62-4bc1-4f0f-8c4e-a12d87d39afa, infoPort=46823, infoSecurePort=0, ipcPort=46497, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-07T17:22:42,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc23107e0ac64fa5 with lease ID 0xd012592c3c5a44c4: Processing first storage report for DS-4a91c7f3-67c1-4bee-8962-5132d6578b91 from datanode DatanodeRegistration(127.0.0.1:33595, datanodeUuid=84e8f957-34ea-4a56-9f59-ce7d0ef2643a, infoPort=36071, infoSecurePort=0, ipcPort=34995, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc23107e0ac64fa5 with lease ID 0xd012592c3c5a44c4: from storage DS-4a91c7f3-67c1-4bee-8962-5132d6578b91 node DatanodeRegistration(127.0.0.1:33595, datanodeUuid=84e8f957-34ea-4a56-9f59-ce7d0ef2643a, infoPort=36071, infoSecurePort=0, ipcPort=34995, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T17:22:42,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12b28e7b4dd31144 with lease ID 0xd012592c3c5a44c5: Processing first storage report for DS-9af47591-e841-45cb-99a4-32b615808aad from datanode DatanodeRegistration(127.0.0.1:45837, datanodeUuid=49b33b62-4bc1-4f0f-8c4e-a12d87d39afa, infoPort=46823, infoSecurePort=0, ipcPort=46497, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,729 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12b28e7b4dd31144 with lease ID 0xd012592c3c5a44c5: from storage DS-9af47591-e841-45cb-99a4-32b615808aad node DatanodeRegistration(127.0.0.1:45837, datanodeUuid=49b33b62-4bc1-4f0f-8c4e-a12d87d39afa, infoPort=46823, infoSecurePort=0, ipcPort=46497, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:22:42,729 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc23107e0ac64fa5 with lease ID 0xd012592c3c5a44c4: Processing first storage report for DS-ea2f0e47-0ccd-4f44-a079-333258b08986 from datanode DatanodeRegistration(127.0.0.1:33595, datanodeUuid=84e8f957-34ea-4a56-9f59-ce7d0ef2643a, infoPort=36071, infoSecurePort=0, ipcPort=34995, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,729 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc23107e0ac64fa5 with lease ID 0xd012592c3c5a44c4: from storage DS-ea2f0e47-0ccd-4f44-a079-333258b08986 node DatanodeRegistration(127.0.0.1:33595, datanodeUuid=84e8f957-34ea-4a56-9f59-ce7d0ef2643a, infoPort=36071, infoSecurePort=0, ipcPort=34995, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:22:42,864 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,866 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624/current, will proceed with Du for space computation calculation, 2024-12-07T17:22:42,913 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T17:22:42,921 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca6bef6198ff758b with lease ID 0xd012592c3c5a44c6: Processing first storage report for DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65 from datanode DatanodeRegistration(127.0.0.1:33165, datanodeUuid=846535b3-9b1c-4b8c-b76b-292d5a3f9bc2, infoPort=43531, infoSecurePort=0, ipcPort=44011, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca6bef6198ff758b with lease ID 0xd012592c3c5a44c6: from storage DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65 node DatanodeRegistration(127.0.0.1:33165, datanodeUuid=846535b3-9b1c-4b8c-b76b-292d5a3f9bc2, infoPort=43531, infoSecurePort=0, ipcPort=44011, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:22:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca6bef6198ff758b with lease ID 0xd012592c3c5a44c6: Processing first storage report for DS-fbe6d707-cba3-4521-a38e-41e893389c68 from datanode DatanodeRegistration(127.0.0.1:33165, datanodeUuid=846535b3-9b1c-4b8c-b76b-292d5a3f9bc2, infoPort=43531, infoSecurePort=0, ipcPort=44011, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624) 2024-12-07T17:22:42,923 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca6bef6198ff758b with lease ID 0xd012592c3c5a44c6: from storage DS-fbe6d707-cba3-4521-a38e-41e893389c68 node DatanodeRegistration(127.0.0.1:33165, datanodeUuid=846535b3-9b1c-4b8c-b76b-292d5a3f9bc2, infoPort=43531, infoSecurePort=0, ipcPort=44011, storageInfo=lv=-57;cid=testClusterID;nsid=1177983154;c=1733592159624), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T17:22:43,253 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d 2024-12-07T17:22:43,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/zookeeper_0, clientPort=58133, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T17:22:43,397 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=58133 2024-12-07T17:22:43,413 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:43,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:43,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:22:43,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:22:43,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741825_1001 (size=7) 2024-12-07T17:22:44,123 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc with version=8 2024-12-07T17:22:44,123 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/hbase-staging 2024-12-07T17:22:44,294 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T17:22:44,568 INFO [Time-limited test {}] client.ConnectionUtils(129): master/5c70517e8267:0 server-side Connection retries=45 2024-12-07T17:22:44,589 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:44,589 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:44,590 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:22:44,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:44,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:22:44,754 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:22:44,866 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T17:22:44,879 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T17:22:44,885 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:22:44,926 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 14684 (auto-detected) 2024-12-07T17:22:44,928 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T17:22:44,958 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36895 2024-12-07T17:22:44,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:44,978 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,001 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36895 connecting to ZooKeeper ensemble=127.0.0.1:58133 2024-12-07T17:22:45,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:368950x0, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:22:45,060 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36895-0x1006d80dcf90000 connected 2024-12-07T17:22:45,126 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:22:45,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:22:45,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:22:45,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36895 2024-12-07T17:22:45,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36895 2024-12-07T17:22:45,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36895 2024-12-07T17:22:45,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36895 2024-12-07T17:22:45,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36895 2024-12-07T17:22:45,184 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc, hbase.cluster.distributed=false 2024-12-07T17:22:45,268 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5c70517e8267:0 server-side Connection retries=45 2024-12-07T17:22:45,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,269 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:22:45,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:22:45,273 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:22:45,276 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:22:45,278 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41313 2024-12-07T17:22:45,281 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:22:45,292 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:22:45,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,297 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,302 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:41313 connecting to ZooKeeper ensemble=127.0.0.1:58133 2024-12-07T17:22:45,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413130x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:22:45,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413130x0, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:22:45,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41313-0x1006d80dcf90001 connected 2024-12-07T17:22:45,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:22:45,317 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:22:45,326 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41313 2024-12-07T17:22:45,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41313 2024-12-07T17:22:45,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41313 2024-12-07T17:22:45,333 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41313 2024-12-07T17:22:45,334 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41313 2024-12-07T17:22:45,362 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5c70517e8267:0 server-side Connection retries=45 2024-12-07T17:22:45,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,364 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:22:45,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:22:45,365 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:22:45,365 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:22:45,372 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38423 2024-12-07T17:22:45,373 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:22:45,400 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:22:45,401 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,404 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,408 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:38423 connecting to ZooKeeper ensemble=127.0.0.1:58133 2024-12-07T17:22:45,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384230x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:22:45,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384230x0, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:22:45,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384230x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:22:45,427 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384230x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:22:45,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38423-0x1006d80dcf90002 connected 2024-12-07T17:22:45,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38423 2024-12-07T17:22:45,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38423 2024-12-07T17:22:45,449 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38423 2024-12-07T17:22:45,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38423 2024-12-07T17:22:45,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38423 2024-12-07T17:22:45,483 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5c70517e8267:0 server-side Connection retries=45 2024-12-07T17:22:45,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T17:22:45,487 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T17:22:45,491 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36163 2024-12-07T17:22:45,492 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T17:22:45,497 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T17:22:45,498 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,502 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,507 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36163 connecting to ZooKeeper ensemble=127.0.0.1:58133 2024-12-07T17:22:45,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361630x0, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T17:22:45,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361630x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:22:45,515 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361630x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:22:45,516 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361630x0, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T17:22:45,524 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36163-0x1006d80dcf90003 connected 2024-12-07T17:22:45,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36163 2024-12-07T17:22:45,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36163 2024-12-07T17:22:45,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36163 2024-12-07T17:22:45,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36163 2024-12-07T17:22:45,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36163 2024-12-07T17:22:45,540 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/5c70517e8267,36895,1733592164285 2024-12-07T17:22:45,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,554 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5c70517e8267,36895,1733592164285 2024-12-07T17:22:45,564 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5c70517e8267:36895 2024-12-07T17:22:45,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:22:45,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:22:45,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:22:45,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T17:22:45,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,590 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T17:22:45,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T17:22:45,592 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5c70517e8267,36895,1733592164285 from backup master directory 2024-12-07T17:22:45,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5c70517e8267,36895,1733592164285 2024-12-07T17:22:45,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T17:22:45,597 WARN [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:22:45,597 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5c70517e8267,36895,1733592164285 2024-12-07T17:22:45,601 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T17:22:45,604 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T17:22:45,694 WARN [IPC Server handler 1 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:45,695 WARN [IPC Server handler 1 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:45,695 WARN [IPC Server handler 1 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:45,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:22:45,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:22:45,719 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/hbase.id with ID: ad33cde4-86e9-45f1-8cc4-acf80efb5f84 2024-12-07T17:22:45,767 INFO [master/5c70517e8267:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T17:22:45,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:45,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:22:45,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:22:45,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741827_1003 (size=196) 2024-12-07T17:22:45,871 INFO [master/5c70517e8267:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:22:45,873 INFO [master/5c70517e8267:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T17:22:45,896 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:22:45,902 INFO [master/5c70517e8267:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:22:46,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:22:46,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:22:46,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741828_1004 (size=1189) 2024-12-07T17:22:46,037 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/data/master/store 2024-12-07T17:22:46,050 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:46,050 WARN [IPC Server handler 2 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:46,051 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:46,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:22:46,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:22:46,079 INFO [master/5c70517e8267:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T17:22:46,079 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:46,080 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:22:46,080 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:22:46,080 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:22:46,081 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:22:46,081 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:22:46,081 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:22:46,081 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T17:22:46,083 WARN [master/5c70517e8267:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/data/master/store/.initializing 2024-12-07T17:22:46,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285 2024-12-07T17:22:46,097 INFO [master/5c70517e8267:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:22:46,114 INFO [master/5c70517e8267:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5c70517e8267%2C36895%2C1733592164285, suffix=, logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285, archiveDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/oldWALs, maxLogs=10 2024-12-07T17:22:46,139 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119, exclude list is [], retry=0 2024-12-07T17:22:46,143 WARN [IPC Server handler 4 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:46,143 WARN [IPC Server handler 4 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:46,143 WARN [IPC Server handler 4 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:46,165 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33595,DS-4a91c7f3-67c1-4bee-8962-5132d6578b91,DISK] 2024-12-07T17:22:46,165 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33165,DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65,DISK] 2024-12-07T17:22:46,169 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T17:22:46,224 INFO [master/5c70517e8267:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 2024-12-07T17:22:46,225 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43531:43531),(127.0.0.1/127.0.0.1:36071:36071)] 2024-12-07T17:22:46,225 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:22:46,226 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:46,230 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,231 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,320 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T17:22:46,326 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:46,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:46,332 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,341 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T17:22:46,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:46,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:46,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T17:22:46,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:46,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:46,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T17:22:46,363 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:46,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:46,369 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,370 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,380 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T17:22:46,385 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T17:22:46,392 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:46,394 INFO [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70691596, jitterRate=0.0533868670463562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T17:22:46,401 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T17:22:46,402 INFO [master/5c70517e8267:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T17:22:46,455 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5218537, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:46,514 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-07T17:22:46,537 INFO [master/5c70517e8267:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T17:22:46,537 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T17:22:46,540 INFO [master/5c70517e8267:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T17:22:46,542 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T17:22:46,549 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-12-07T17:22:46,550 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T17:22:46,595 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T17:22:46,614 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T17:22:46,617 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-07T17:22:46,621 INFO [master/5c70517e8267:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T17:22:46,626 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T17:22:46,629 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-07T17:22:46,632 INFO [master/5c70517e8267:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T17:22:46,639 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T17:22:46,644 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-07T17:22:46,646 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T17:22:46,648 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T17:22:46,675 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T17:22:46,677 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T17:22:46,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,690 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=5c70517e8267,36895,1733592164285, sessionid=0x1006d80dcf90000, setting cluster-up flag (Was=false) 2024-12-07T17:22:46,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,718 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T17:22:46,719 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5c70517e8267,36895,1733592164285 2024-12-07T17:22:46,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:46,755 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T17:22:46,758 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5c70517e8267,36895,1733592164285 2024-12-07T17:22:46,897 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5c70517e8267:41313 2024-12-07T17:22:46,905 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;5c70517e8267:36163 2024-12-07T17:22:46,907 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1008): ClusterId : ad33cde4-86e9-45f1-8cc4-acf80efb5f84 2024-12-07T17:22:46,908 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1008): ClusterId : ad33cde4-86e9-45f1-8cc4-acf80efb5f84 2024-12-07T17:22:46,911 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:22:46,913 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;5c70517e8267:38423 2024-12-07T17:22:46,914 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:22:46,917 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1008): ClusterId : ad33cde4-86e9-45f1-8cc4-acf80efb5f84 2024-12-07T17:22:46,917 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T17:22:46,927 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:22:46,927 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:22:46,929 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:22:46,930 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:22:46,934 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T17:22:46,934 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T17:22:46,935 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:22:46,936 DEBUG [RS:0;5c70517e8267:41313 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3325cd63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:46,940 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:22:46,940 DEBUG [RS:1;5c70517e8267:38423 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@197b644b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:46,942 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T17:22:46,943 DEBUG [RS:0;5c70517e8267:41313 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4369e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5c70517e8267/172.17.0.2:0 2024-12-07T17:22:46,947 DEBUG [RS:2;5c70517e8267:36163 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14200fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:46,948 DEBUG [RS:1;5c70517e8267:38423 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bfd0ed9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5c70517e8267/172.17.0.2:0 2024-12-07T17:22:46,949 INFO [RS:0;5c70517e8267:41313 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T17:22:46,949 INFO [RS:1;5c70517e8267:38423 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T17:22:46,949 INFO [RS:0;5c70517e8267:41313 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T17:22:46,949 INFO [RS:1;5c70517e8267:38423 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T17:22:46,950 DEBUG [RS:2;5c70517e8267:36163 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b4c1132, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5c70517e8267/172.17.0.2:0 2024-12-07T17:22:46,950 INFO [RS:2;5c70517e8267:36163 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T17:22:46,950 INFO [RS:2;5c70517e8267:36163 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T17:22:46,952 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-07T17:22:46,955 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T17:22:46,955 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T17:22:46,955 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T17:22:46,956 INFO [RS:1;5c70517e8267:38423 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:46,956 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T17:22:46,956 INFO [RS:0;5c70517e8267:41313 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:46,957 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T17:22:46,957 INFO [RS:2;5c70517e8267:36163 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:46,957 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T17:22:46,958 INFO [master/5c70517e8267:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:46,958 INFO [master/5c70517e8267:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-07T17:22:46,965 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:38423, startcode=1733592165361 2024-12-07T17:22:46,965 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:36163, startcode=1733592165482 2024-12-07T17:22:46,971 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:41313, startcode=1733592165267 2024-12-07T17:22:46,983 DEBUG [RS:0;5c70517e8267:41313 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:22:46,983 DEBUG [RS:2;5c70517e8267:36163 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:22:46,983 DEBUG [RS:1;5c70517e8267:38423 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:22:47,041 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-07T17:22:47,058 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56987, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:22:47,059 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34449, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:22:47,062 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33445, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:22:47,062 INFO [master/5c70517e8267:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-07T17:22:47,066 INFO [master/5c70517e8267:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T17:22:47,067 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T17:22:47,074 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T17:22:47,075 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T17:22:47,074 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5c70517e8267,36895,1733592164285 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T17:22:47,082 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5c70517e8267:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:22:47,082 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5c70517e8267:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:22:47,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5c70517e8267:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:22:47,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5c70517e8267:0, corePoolSize=5, maxPoolSize=5 2024-12-07T17:22:47,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5c70517e8267:0, corePoolSize=10, maxPoolSize=10 2024-12-07T17:22:47,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,083 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5c70517e8267:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:22:47,084 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,091 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:22:47,091 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-07T17:22:47,099 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:47,099 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T17:22:47,109 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T17:22:47,109 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T17:22:47,109 WARN [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T17:22:47,109 WARN [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T17:22:47,109 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T17:22:47,109 WARN [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T17:22:47,116 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733592197116 2024-12-07T17:22:47,118 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T17:22:47,120 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T17:22:47,124 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T17:22:47,125 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T17:22:47,125 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T17:22:47,125 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T17:22:47,127 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,128 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T17:22:47,128 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,128 WARN [IPC Server handler 2 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,129 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,130 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T17:22:47,130 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T17:22:47,136 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T17:22:47,136 INFO [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T17:22:47,144 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.large.0-1733592167138,5,FailOnTimeoutGroup] 2024-12-07T17:22:47,152 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.small.0-1733592167145,5,FailOnTimeoutGroup] 2024-12-07T17:22:47,152 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,152 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T17:22:47,154 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,155 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741831_1007 (size=1039) 2024-12-07T17:22:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741831_1007 (size=1039) 2024-12-07T17:22:47,169 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-07T17:22:47,170 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:47,185 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,185 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,185 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,186 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,186 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,186 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,186 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,187 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,187 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,187 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,187 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,187 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:22:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:22:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741832_1008 (size=32) 2024-12-07T17:22:47,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:47,208 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:22:47,210 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:36163, startcode=1733592165482 2024-12-07T17:22:47,210 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:38423, startcode=1733592165361 2024-12-07T17:22:47,211 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(3073): reportForDuty to master=5c70517e8267,36895,1733592164285 with isa=5c70517e8267/172.17.0.2:41313, startcode=1733592165267 2024-12-07T17:22:47,211 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:22:47,211 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:47,212 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,215 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(486): Registering regionserver=5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:47,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:22:47,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:22:47,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:47,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:47,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:22:47,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:22:47,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:47,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:47,227 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:47,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,227 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33765 2024-12-07T17:22:47,227 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(486): Registering regionserver=5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,227 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T17:22:47,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740 2024-12-07T17:22:47,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740 2024-12-07T17:22:47,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,233 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:47,233 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33765 2024-12-07T17:22:47,233 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T17:22:47,233 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T17:22:47,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:22:47,233 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895 {}] master.ServerManager(486): Registering regionserver=5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,235 DEBUG [RS:2;5c70517e8267:36163 {}] zookeeper.ZKUtil(111): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,235 WARN [RS:2;5c70517e8267:36163 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:22:47,235 INFO [RS:2;5c70517e8267:36163 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:22:47,235 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T17:22:47,241 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:47,241 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33765 2024-12-07T17:22:47,241 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T17:22:47,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:22:47,245 DEBUG [RS:0;5c70517e8267:41313 {}] zookeeper.ZKUtil(111): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,245 DEBUG [RS:1;5c70517e8267:38423 {}] zookeeper.ZKUtil(111): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,245 WARN [RS:0;5c70517e8267:41313 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:22:47,245 WARN [RS:1;5c70517e8267:38423 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T17:22:47,245 INFO [RS:1;5c70517e8267:38423 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:22:47,245 INFO [RS:0;5c70517e8267:41313 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:22:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5c70517e8267,36163,1733592165482] 2024-12-07T17:22:47,245 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5c70517e8267,41313,1733592165267] 2024-12-07T17:22:47,245 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,245 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,247 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5c70517e8267,38423,1733592165361] 2024-12-07T17:22:47,249 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:47,251 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66587307, jitterRate=-0.007771804928779602}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T17:22:47,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T17:22:47,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:22:47,256 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T17:22:47,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T17:22:47,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:22:47,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:22:47,258 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T17:22:47,258 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T17:22:47,261 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T17:22:47,262 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-07T17:22:47,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T17:22:47,270 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T17:22:47,272 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T17:22:47,272 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T17:22:47,279 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T17:22:47,283 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T17:22:47,292 INFO [RS:1;5c70517e8267:38423 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:22:47,292 INFO [RS:2;5c70517e8267:36163 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:22:47,292 INFO [RS:0;5c70517e8267:41313 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T17:22:47,316 INFO [RS:0;5c70517e8267:41313 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:22:47,316 INFO [RS:2;5c70517e8267:36163 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:22:47,318 INFO [RS:1;5c70517e8267:38423 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T17:22:47,322 INFO [RS:2;5c70517e8267:36163 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:22:47,322 INFO [RS:0;5c70517e8267:41313 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:22:47,322 INFO [RS:1;5c70517e8267:38423 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T17:22:47,322 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,322 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,322 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,323 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T17:22:47,323 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T17:22:47,323 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T17:22:47,332 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,332 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,332 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,333 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5c70517e8267:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5c70517e8267:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:22:47,334 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5c70517e8267:0, corePoolSize=2, maxPoolSize=2 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,335 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,335 DEBUG [RS:2;5c70517e8267:36163 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,335 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5c70517e8267:0, corePoolSize=1, maxPoolSize=1 2024-12-07T17:22:47,334 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,335 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,335 DEBUG [RS:0;5c70517e8267:41313 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,335 DEBUG [RS:1;5c70517e8267:38423 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5c70517e8267:0, corePoolSize=3, maxPoolSize=3 2024-12-07T17:22:47,336 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,336 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,336 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,337 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,337 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36163,1733592165482-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,38423,1733592165361-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:22:47,339 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,340 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,41313,1733592165267-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:22:47,367 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:22:47,367 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:22:47,369 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T17:22:47,370 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,41313,1733592165267-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,370 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36163,1733592165482-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,370 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,38423,1733592165361-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:47,399 INFO [RS:0;5c70517e8267:41313 {}] regionserver.Replication(204): 5c70517e8267,41313,1733592165267 started 2024-12-07T17:22:47,399 INFO [RS:2;5c70517e8267:36163 {}] regionserver.Replication(204): 5c70517e8267,36163,1733592165482 started 2024-12-07T17:22:47,399 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1767): Serving as 5c70517e8267,41313,1733592165267, RpcServer on 5c70517e8267/172.17.0.2:41313, sessionid=0x1006d80dcf90001 2024-12-07T17:22:47,399 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1767): Serving as 5c70517e8267,36163,1733592165482, RpcServer on 5c70517e8267/172.17.0.2:36163, sessionid=0x1006d80dcf90003 2024-12-07T17:22:47,400 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:22:47,400 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:22:47,400 DEBUG [RS:0;5c70517e8267:41313 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,400 DEBUG [RS:2;5c70517e8267:36163 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,400 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,41313,1733592165267' 2024-12-07T17:22:47,401 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:22:47,401 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,36163,1733592165482' 2024-12-07T17:22:47,401 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:22:47,404 INFO [RS:1;5c70517e8267:38423 {}] regionserver.Replication(204): 5c70517e8267,38423,1733592165361 started 2024-12-07T17:22:47,404 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1767): Serving as 5c70517e8267,38423,1733592165361, RpcServer on 5c70517e8267/172.17.0.2:38423, sessionid=0x1006d80dcf90002 2024-12-07T17:22:47,404 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T17:22:47,404 DEBUG [RS:1;5c70517e8267:38423 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,404 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,38423,1733592165361' 2024-12-07T17:22:47,404 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T17:22:47,407 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:22:47,407 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:22:47,407 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T17:22:47,408 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:22:47,408 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:22:47,408 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:22:47,408 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:22:47,408 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T17:22:47,408 DEBUG [RS:2;5c70517e8267:36163 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5c70517e8267,36163,1733592165482 2024-12-07T17:22:47,409 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T17:22:47,408 DEBUG [RS:0;5c70517e8267:41313 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5c70517e8267,41313,1733592165267 2024-12-07T17:22:47,409 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,36163,1733592165482' 2024-12-07T17:22:47,409 DEBUG [RS:1;5c70517e8267:38423 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,409 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,41313,1733592165267' 2024-12-07T17:22:47,409 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:22:47,409 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5c70517e8267,38423,1733592165361' 2024-12-07T17:22:47,409 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:22:47,409 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T17:22:47,409 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:22:47,412 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:22:47,412 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T17:22:47,413 DEBUG [RS:1;5c70517e8267:38423 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:22:47,413 DEBUG [RS:0;5c70517e8267:41313 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:22:47,413 INFO [RS:1;5c70517e8267:38423 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:22:47,413 INFO [RS:0;5c70517e8267:41313 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:22:47,413 INFO [RS:0;5c70517e8267:41313 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:22:47,413 INFO [RS:1;5c70517e8267:38423 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:22:47,415 DEBUG [RS:2;5c70517e8267:36163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T17:22:47,416 INFO [RS:2;5c70517e8267:36163 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T17:22:47,416 INFO [RS:2;5c70517e8267:36163 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T17:22:47,434 WARN [5c70517e8267:36895 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-07T17:22:47,521 INFO [RS:1;5c70517e8267:38423 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:22:47,521 INFO [RS:2;5c70517e8267:36163 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:22:47,521 INFO [RS:0;5c70517e8267:41313 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T17:22:47,525 INFO [RS:0;5c70517e8267:41313 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5c70517e8267%2C41313%2C1733592165267, suffix=, logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,41313,1733592165267, archiveDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs, maxLogs=32 2024-12-07T17:22:47,526 INFO [RS:2;5c70517e8267:36163 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5c70517e8267%2C36163%2C1733592165482, suffix=, logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,36163,1733592165482, archiveDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs, maxLogs=32 2024-12-07T17:22:47,526 INFO [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5c70517e8267%2C38423%2C1733592165361, suffix=, logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361, archiveDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs, maxLogs=32 2024-12-07T17:22:47,550 DEBUG [RS:1;5c70517e8267:38423 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361/5c70517e8267%2C38423%2C1733592165361.1733592167529, exclude list is [], retry=0 2024-12-07T17:22:47,550 DEBUG [RS:2;5c70517e8267:36163 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,36163,1733592165482/5c70517e8267%2C36163%2C1733592165482.1733592167531, exclude list is [], retry=0 2024-12-07T17:22:47,551 DEBUG [RS:0;5c70517e8267:41313 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,41313,1733592165267/5c70517e8267%2C41313%2C1733592165267.1733592167528, exclude list is [], retry=0 2024-12-07T17:22:47,556 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,556 WARN [IPC Server handler 3 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:47,556 WARN [IPC Server handler 3 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,556 WARN [IPC Server handler 3 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,556 WARN [IPC Server handler 2 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:47,557 WARN [IPC Server handler 2 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:47,563 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45837,DS-958cba15-3134-4320-a3da-a10e5b18094b,DISK] 2024-12-07T17:22:47,563 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33165,DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65,DISK] 2024-12-07T17:22:47,564 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33595,DS-4a91c7f3-67c1-4bee-8962-5132d6578b91,DISK] 2024-12-07T17:22:47,564 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33165,DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65,DISK] 2024-12-07T17:22:47,566 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33595,DS-4a91c7f3-67c1-4bee-8962-5132d6578b91,DISK] 2024-12-07T17:22:47,567 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33595,DS-4a91c7f3-67c1-4bee-8962-5132d6578b91,DISK] 2024-12-07T17:22:47,567 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33165,DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65,DISK] 2024-12-07T17:22:47,634 INFO [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361/5c70517e8267%2C38423%2C1733592165361.1733592167529 2024-12-07T17:22:47,634 INFO [RS:0;5c70517e8267:41313 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,41313,1733592165267/5c70517e8267%2C41313%2C1733592165267.1733592167528 2024-12-07T17:22:47,635 INFO [RS:2;5c70517e8267:36163 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,36163,1733592165482/5c70517e8267%2C36163%2C1733592165482.1733592167531 2024-12-07T17:22:47,636 DEBUG [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36071:36071),(127.0.0.1/127.0.0.1:43531:43531)] 2024-12-07T17:22:47,648 DEBUG [RS:0;5c70517e8267:41313 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36071:36071),(127.0.0.1/127.0.0.1:43531:43531)] 2024-12-07T17:22:47,652 DEBUG [RS:2;5c70517e8267:36163 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46823:46823),(127.0.0.1/127.0.0.1:43531:43531),(127.0.0.1/127.0.0.1:36071:36071)] 2024-12-07T17:22:47,686 DEBUG [5c70517e8267:36895 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T17:22:47,690 DEBUG [5c70517e8267:36895 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:22:47,698 DEBUG [5c70517e8267:36895 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:22:47,698 DEBUG [5c70517e8267:36895 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:22:47,698 DEBUG [5c70517e8267:36895 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:22:47,699 INFO [5c70517e8267:36895 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:22:47,699 INFO [5c70517e8267:36895 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:22:47,699 INFO [5c70517e8267:36895 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:22:47,699 DEBUG [5c70517e8267:36895 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:22:47,706 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,713 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5c70517e8267,38423,1733592165361, state=OPENING 2024-12-07T17:22:47,719 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T17:22:47,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:47,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:47,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:47,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:47,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:47,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:47,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:47,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:47,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:22:47,908 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:22:47,911 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:22:47,914 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:22:47,929 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-07T17:22:47,929 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T17:22:47,930 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T17:22:47,934 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5c70517e8267%2C38423%2C1733592165361.meta, suffix=.meta, logDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361, archiveDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs, maxLogs=32 2024-12-07T17:22:47,959 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361/5c70517e8267%2C38423%2C1733592165361.meta.1733592167936.meta, exclude list is [], retry=0 2024-12-07T17:22:47,964 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45837,DS-958cba15-3134-4320-a3da-a10e5b18094b,DISK] 2024-12-07T17:22:47,964 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33165,DS-dcd6d0fb-0560-48ee-91f4-03ad18ceda65,DISK] 2024-12-07T17:22:47,965 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33595,DS-4a91c7f3-67c1-4bee-8962-5132d6578b91,DISK] 2024-12-07T17:22:47,976 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/WALs/5c70517e8267,38423,1733592165361/5c70517e8267%2C38423%2C1733592165361.meta.1733592167936.meta 2024-12-07T17:22:47,980 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46823:46823),(127.0.0.1/127.0.0.1:43531:43531),(127.0.0.1/127.0.0.1:36071:36071)] 2024-12-07T17:22:47,980 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:22:47,982 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-07T17:22:47,983 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:47,983 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T17:22:47,985 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T17:22:47,987 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T17:22:47,998 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T17:22:47,998 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:47,999 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-07T17:22:47,999 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-07T17:22:48,004 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T17:22:48,006 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T17:22:48,006 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:48,007 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T17:22:48,009 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T17:22:48,009 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:48,010 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T17:22:48,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T17:22:48,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T17:22:48,014 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740 2024-12-07T17:22:48,017 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740 2024-12-07T17:22:48,020 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T17:22:48,024 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T17:22:48,026 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72088475, jitterRate=0.07420198619365692}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T17:22:48,030 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T17:22:48,040 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733592167900 2024-12-07T17:22:48,054 DEBUG [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T17:22:48,054 INFO [RS_OPEN_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-07T17:22:48,056 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:22:48,058 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5c70517e8267,38423,1733592165361, state=OPEN 2024-12-07T17:22:48,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:22:48,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:22:48,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:22:48,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T17:22:48,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:48,060 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:48,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:48,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T17:22:48,065 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T17:22:48,065 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=5c70517e8267,38423,1733592165361 in 335 msec 2024-12-07T17:22:48,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T17:22:48,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 798 msec 2024-12-07T17:22:48,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1100 sec 2024-12-07T17:22:48,077 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733592168077, completionTime=-1 2024-12-07T17:22:48,077 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T17:22:48,078 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-07T17:22:48,119 DEBUG [hconnection-0xeb8403e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:48,122 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:48,134 INFO [master/5c70517e8267:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-07T17:22:48,135 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733592228135 2024-12-07T17:22:48,135 INFO [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733592288135 2024-12-07T17:22:48,135 INFO [master/5c70517e8267:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 57 msec 2024-12-07T17:22:48,159 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:22:48,167 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:48,167 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:48,167 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:48,169 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5c70517e8267:36895, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:48,169 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:48,175 DEBUG [master/5c70517e8267:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-07T17:22:48,178 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-07T17:22:48,179 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T17:22:48,186 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-07T17:22:48,189 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:22:48,190 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,192 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:22:48,199 WARN [IPC Server handler 3 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:48,200 WARN [IPC Server handler 3 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:48,200 WARN [IPC Server handler 3 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:48,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741837_1013 (size=358) 2024-12-07T17:22:48,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741837_1013 (size=358) 2024-12-07T17:22:48,209 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6860b2d9133d857f1acf6c9fb2cd82f1, NAME => 'hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:48,213 WARN [IPC Server handler 4 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T17:22:48,213 WARN [IPC Server handler 4 on default port 33765 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T17:22:48,213 WARN [IPC Server handler 4 on default port 33765 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T17:22:48,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741838_1014 (size=42) 2024-12-07T17:22:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741838_1014 (size=42) 2024-12-07T17:22:48,224 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:48,225 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 6860b2d9133d857f1acf6c9fb2cd82f1, disabling compactions & flushes 2024-12-07T17:22:48,225 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,225 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,225 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. after waiting 0 ms 2024-12-07T17:22:48,225 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,225 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,225 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6860b2d9133d857f1acf6c9fb2cd82f1: 2024-12-07T17:22:48,227 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:22:48,234 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733592168228"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592168228"}]},"ts":"1733592168228"} 2024-12-07T17:22:48,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T17:22:48,262 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:22:48,265 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592168262"}]},"ts":"1733592168262"} 2024-12-07T17:22:48,270 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-07T17:22:48,274 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:22:48,276 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:22:48,276 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:22:48,276 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:22:48,276 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:22:48,276 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:22:48,277 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:22:48,277 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:22:48,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=6860b2d9133d857f1acf6c9fb2cd82f1, ASSIGN}] 2024-12-07T17:22:48,282 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=6860b2d9133d857f1acf6c9fb2cd82f1, ASSIGN 2024-12-07T17:22:48,284 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=6860b2d9133d857f1acf6c9fb2cd82f1, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:22:48,435 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T17:22:48,436 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=6860b2d9133d857f1acf6c9fb2cd82f1, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:22:48,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 6860b2d9133d857f1acf6c9fb2cd82f1, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:22:48,595 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:22:48,595 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:22:48,597 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:22:48,608 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,609 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 6860b2d9133d857f1acf6c9fb2cd82f1, NAME => 'hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:22:48,609 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. service=AccessControlService 2024-12-07T17:22:48,609 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:48,610 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,610 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:48,610 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,610 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,616 INFO [StoreOpener-6860b2d9133d857f1acf6c9fb2cd82f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,619 INFO [StoreOpener-6860b2d9133d857f1acf6c9fb2cd82f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6860b2d9133d857f1acf6c9fb2cd82f1 columnFamilyName info 2024-12-07T17:22:48,619 DEBUG [StoreOpener-6860b2d9133d857f1acf6c9fb2cd82f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,620 INFO [StoreOpener-6860b2d9133d857f1acf6c9fb2cd82f1-1 {}] regionserver.HStore(327): Store=6860b2d9133d857f1acf6c9fb2cd82f1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:48,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,625 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,630 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:22:48,636 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:48,638 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 6860b2d9133d857f1acf6c9fb2cd82f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60342077, jitterRate=-0.1008329838514328}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:22:48,642 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 6860b2d9133d857f1acf6c9fb2cd82f1: 2024-12-07T17:22:48,645 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1., pid=6, masterSystemTime=1733592168594 2024-12-07T17:22:48,650 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,650 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:22:48,651 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=6860b2d9133d857f1acf6c9fb2cd82f1, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:22:48,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T17:22:48,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 6860b2d9133d857f1acf6c9fb2cd82f1, server=5c70517e8267,36163,1733592165482 in 219 msec 2024-12-07T17:22:48,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T17:22:48,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=6860b2d9133d857f1acf6c9fb2cd82f1, ASSIGN in 386 msec 2024-12-07T17:22:48,673 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:22:48,673 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592168673"}]},"ts":"1733592168673"} 2024-12-07T17:22:48,676 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-07T17:22:48,681 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:22:48,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 501 msec 2024-12-07T17:22:48,689 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-07T17:22:48,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-07T17:22:48,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:48,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:48,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:48,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:48,735 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:48,737 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:48,830 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-07T17:22:48,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T17:22:48,866 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 51 msec 2024-12-07T17:22:48,890 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-07T17:22:48,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T17:22:48,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 30 msec 2024-12-07T17:22:48,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-07T17:22:48,943 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-07T17:22:48,943 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.346sec 2024-12-07T17:22:48,945 INFO [master/5c70517e8267:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T17:22:48,946 INFO [master/5c70517e8267:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T17:22:48,947 INFO [master/5c70517e8267:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T17:22:48,948 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T17:22:48,948 INFO [master/5c70517e8267:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T17:22:48,949 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T17:22:48,949 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T17:22:48,975 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T17:22:48,979 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-07T17:22:48,982 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:22:48,982 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:48,983 INFO [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-07T17:22:48,988 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T17:22:48,989 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:22:49,025 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x112a7c4c to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f8c89f7 2024-12-07T17:22:49,028 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-07T17:22:49,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741839_1015 (size=349) 2024-12-07T17:22:49,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741839_1015 (size=349) 2024-12-07T17:22:49,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741839_1015 (size=349) 2024-12-07T17:22:49,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf35e71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:49,085 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 27a2410ab2d9b432daa26ec0ea504b68, NAME => 'hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:49,086 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T17:22:49,086 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T17:22:49,090 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T17:22:49,103 DEBUG [hconnection-0x1df0d3b0-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:49,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741840_1016 (size=36) 2024-12-07T17:22:49,122 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:49,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=5c70517e8267,36895,1733592164285 2024-12-07T17:22:49,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-07T17:22:49,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/test.cache.data in system properties and HBase conf 2024-12-07T17:22:49,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T17:22:49,126 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T17:22:49,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741840_1016 (size=36) 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-07T17:22:49,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741840_1016 (size=36) 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/nfs.dump.dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T17:22:49,127 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T17:22:49,128 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 27a2410ab2d9b432daa26ec0ea504b68, disabling compactions & flushes 2024-12-07T17:22:49,131 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. after waiting 0 ms 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,131 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,131 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 27a2410ab2d9b432daa26ec0ea504b68: 2024-12-07T17:22:49,134 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:22:49,134 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733592169134"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592169134"}]},"ts":"1733592169134"} 2024-12-07T17:22:49,138 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T17:22:49,141 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:22:49,141 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592169141"}]},"ts":"1733592169141"} 2024-12-07T17:22:49,145 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-07T17:22:49,150 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:22:49,151 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:22:49,152 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:22:49,152 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:22:49,152 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:22:49,152 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:22:49,152 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:22:49,152 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:22:49,152 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=27a2410ab2d9b432daa26ec0ea504b68, ASSIGN}] 2024-12-07T17:22:49,155 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=27a2410ab2d9b432daa26ec0ea504b68, ASSIGN 2024-12-07T17:22:49,156 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=27a2410ab2d9b432daa26ec0ea504b68, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:22:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741841_1017 (size=592039) 2024-12-07T17:22:49,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741841_1017 (size=592039) 2024-12-07T17:22:49,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741841_1017 (size=592039) 2024-12-07T17:22:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741842_1018 (size=1663647) 2024-12-07T17:22:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741842_1018 (size=1663647) 2024-12-07T17:22:49,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741842_1018 (size=1663647) 2024-12-07T17:22:49,290 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T17:22:49,307 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T17:22:49,308 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=27a2410ab2d9b432daa26ec0ea504b68, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:22:49,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 27a2410ab2d9b432daa26ec0ea504b68, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:22:49,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:22:49,563 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 27a2410ab2d9b432daa26ec0ea504b68, NAME => 'hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:22:49,564 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. service=AccessControlService 2024-12-07T17:22:49,564 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:49,565 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,565 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:49,565 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,565 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,572 INFO [StoreOpener-27a2410ab2d9b432daa26ec0ea504b68-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,580 INFO [StoreOpener-27a2410ab2d9b432daa26ec0ea504b68-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 27a2410ab2d9b432daa26ec0ea504b68 columnFamilyName l 2024-12-07T17:22:49,580 DEBUG [StoreOpener-27a2410ab2d9b432daa26ec0ea504b68-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:49,581 INFO [StoreOpener-27a2410ab2d9b432daa26ec0ea504b68-1 {}] regionserver.HStore(327): Store=27a2410ab2d9b432daa26ec0ea504b68/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:49,583 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,584 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,590 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:22:49,591 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T17:22:49,597 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:49,598 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 27a2410ab2d9b432daa26ec0ea504b68; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74556476, jitterRate=0.11097806692123413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:22:49,601 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 27a2410ab2d9b432daa26ec0ea504b68: 2024-12-07T17:22:49,604 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68., pid=11, masterSystemTime=1733592169501 2024-12-07T17:22:49,607 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,609 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=27a2410ab2d9b432daa26ec0ea504b68, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:22:49,610 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:22:49,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-07T17:22:49,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 27a2410ab2d9b432daa26ec0ea504b68, server=5c70517e8267,36163,1733592165482 in 301 msec 2024-12-07T17:22:49,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T17:22:49,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=27a2410ab2d9b432daa26ec0ea504b68, ASSIGN in 467 msec 2024-12-07T17:22:49,626 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:22:49,626 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592169626"}]},"ts":"1733592169626"} 2024-12-07T17:22:49,630 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-07T17:22:49,634 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:22:49,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 660 msec 2024-12-07T17:22:50,091 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T17:22:50,091 INFO [master/5c70517e8267:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-07T17:22:50,113 DEBUG [master/5c70517e8267:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-07T17:22:50,114 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T17:22:50,115 INFO [master/5c70517e8267:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5c70517e8267,36895,1733592164285-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T17:22:51,256 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:51,429 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:51,694 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:51,695 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T17:22:51,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:51,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741829_1005 (size=34) 2024-12-07T17:22:51,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741831_1007 (size=1039) 2024-12-07T17:22:51,725 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:51,725 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:51,725 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:22:51,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:51,727 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:51,727 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:22:51,727 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20d16c0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:51,727 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e8b35f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:51,732 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:51,741 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fe36ded{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:51,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ef1f8fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:51,942 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T17:22:51,942 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-07T17:22:51,942 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T17:22:51,967 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T17:22:51,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741826_1002 (size=42) 2024-12-07T17:22:51,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741837_1013 (size=358) 2024-12-07T17:22:52,054 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:52,595 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:52,985 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:53,031 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a1c4462{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-42875-hadoop-yarn-common-3_4_1_jar-_-any-9044831282968454276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-07T17:22:53,031 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16715c05{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-45273-hadoop-yarn-common-3_4_1_jar-_-any-7834035845927571214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-07T17:22:53,032 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@785080a6{HTTP/1.1, (http/1.1)}{localhost:45273} 2024-12-07T17:22:53,032 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@ae48202{HTTP/1.1, (http/1.1)}{localhost:42875} 2024-12-07T17:22:53,032 INFO [Time-limited test {}] server.Server(415): Started @17109ms 2024-12-07T17:22:53,032 INFO [Thread-383 {}] server.Server(415): Started @17109ms 2024-12-07T17:22:53,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741843_1019 (size=5) 2024-12-07T17:22:53,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741843_1019 (size=5) 2024-12-07T17:22:53,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741843_1019 (size=5) 2024-12-07T17:22:53,517 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:22:53,630 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-07T17:22:53,633 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-07T17:22:53,635 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T17:22:54,085 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-07T17:22:54,092 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:54,140 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T17:22:54,141 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:54,150 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:54,151 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:54,151 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T17:22:54,152 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:54,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a76a006{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:54,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5423671f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:54,236 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-07T17:22:54,236 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T17:22:54,236 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T17:22:54,236 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T17:22:54,248 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:54,298 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:54,512 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:54,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@573bf81a{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-40137-hadoop-yarn-common-3_4_1_jar-_-any-6135329668287126051/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T17:22:54,539 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f654859{HTTP/1.1, (http/1.1)}{localhost:40137} 2024-12-07T17:22:54,539 INFO [Time-limited test {}] server.Server(415): Started @18616ms 2024-12-07T17:22:54,792 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-07T17:22:54,796 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:54,817 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T17:22:54,818 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T17:22:54,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T17:22:54,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T17:22:54,824 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T17:22:54,826 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T17:22:54,828 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27b49aa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,AVAILABLE} 2024-12-07T17:22:54,828 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d7dfc02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T17:22:54,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:22:54,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-07T17:22:54,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T17:22:54,862 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T17:22:54,865 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:22:54,865 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-07T17:22:54,866 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T17:22:54,866 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T17:22:54,867 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-07T17:22:54,867 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-07T17:22:54,869 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-07T17:22:54,869 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-07T17:22:54,871 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:22:54,871 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-07T17:22:54,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T17:22:54,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-07T17:22:54,872 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T17:22:54,872 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T17:22:54,891 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-07T17:22:54,892 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T17:22:54,892 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T17:22:54,892 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T17:22:54,902 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:54,907 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:54,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741838_1014 (size=42) 2024-12-07T17:22:55,070 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T17:22:55,077 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e35c669{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/java.io.tmpdir/jetty-localhost-35779-hadoop-yarn-common-3_4_1_jar-_-any-10298438292653981021/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T17:22:55,078 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cae447d{HTTP/1.1, (http/1.1)}{localhost:35779} 2024-12-07T17:22:55,078 INFO [Time-limited test {}] server.Server(415): Started @19155ms 2024-12-07T17:22:55,119 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-07T17:22:55,120 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:22:55,155 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=719, OpenFileDescriptor=763, MaxFileDescriptor=1048576, SystemLoadAverage=481, ProcessCount=11, AvailableMemoryMB=7323 2024-12-07T17:22:55,155 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-07T17:22:55,167 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T17:22:55,170 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T17:22:55,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:22:55,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:22:55,182 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:22:55,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-07T17:22:55,183 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:55,184 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:22:55,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T17:22:55,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741844_1020 (size=406) 2024-12-07T17:22:55,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741844_1020 (size=406) 2024-12-07T17:22:55,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741844_1020 (size=406) 2024-12-07T17:22:55,221 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => fd33eca1e672f36235ceeba48f989a9d, NAME => 'testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:55,221 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 533037dcce0b4066fcb97082c19841df, NAME => 'testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:55,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741845_1021 (size=67) 2024-12-07T17:22:55,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741845_1021 (size=67) 2024-12-07T17:22:55,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741846_1022 (size=67) 2024-12-07T17:22:55,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741846_1022 (size=67) 2024-12-07T17:22:55,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741846_1022 (size=67) 2024-12-07T17:22:55,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741845_1021 (size=67) 2024-12-07T17:22:55,252 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:55,252 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 533037dcce0b4066fcb97082c19841df, disabling compactions & flushes 2024-12-07T17:22:55,252 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:55,252 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:55,252 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. after waiting 0 ms 2024-12-07T17:22:55,252 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:55,252 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:55,253 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 533037dcce0b4066fcb97082c19841df: 2024-12-07T17:22:55,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T17:22:55,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T17:22:55,651 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:55,651 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing fd33eca1e672f36235ceeba48f989a9d, disabling compactions & flushes 2024-12-07T17:22:55,651 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:55,651 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:55,652 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. after waiting 0 ms 2024-12-07T17:22:55,652 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:55,652 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:55,652 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for fd33eca1e672f36235ceeba48f989a9d: 2024-12-07T17:22:55,654 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:22:55,654 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733592175654"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592175654"}]},"ts":"1733592175654"} 2024-12-07T17:22:55,655 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733592175654"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592175654"}]},"ts":"1733592175654"} 2024-12-07T17:22:55,697 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:22:55,699 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:22:55,699 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592175699"}]},"ts":"1733592175699"} 2024-12-07T17:22:55,702 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-07T17:22:55,707 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:22:55,710 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:22:55,711 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:22:55,711 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:22:55,711 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:22:55,711 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:22:55,711 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:22:55,711 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:22:55,711 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, ASSIGN}] 2024-12-07T17:22:55,714 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, ASSIGN 2024-12-07T17:22:55,716 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:22:55,718 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, ASSIGN 2024-12-07T17:22:55,719 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:22:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T17:22:55,866 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:22:55,867 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=fd33eca1e672f36235ceeba48f989a9d, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:22:55,867 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=533037dcce0b4066fcb97082c19841df, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:22:55,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; OpenRegionProcedure fd33eca1e672f36235ceeba48f989a9d, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:22:55,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE; OpenRegionProcedure 533037dcce0b4066fcb97082c19841df, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:22:56,025 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:22:56,026 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:22:56,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:22:56,044 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:22:56,051 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:56,051 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => fd33eca1e672f36235ceeba48f989a9d, NAME => 'testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:22:56,052 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. service=AccessControlService 2024-12-07T17:22:56,052 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:56,052 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,053 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:56,053 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,053 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,055 INFO [StoreOpener-fd33eca1e672f36235ceeba48f989a9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,057 INFO [StoreOpener-fd33eca1e672f36235ceeba48f989a9d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fd33eca1e672f36235ceeba48f989a9d columnFamilyName cf 2024-12-07T17:22:56,058 DEBUG [StoreOpener-fd33eca1e672f36235ceeba48f989a9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:56,058 INFO [StoreOpener-fd33eca1e672f36235ceeba48f989a9d-1 {}] regionserver.HStore(327): Store=fd33eca1e672f36235ceeba48f989a9d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:56,060 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,061 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,062 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:56,062 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 533037dcce0b4066fcb97082c19841df, NAME => 'testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:22:56,062 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. service=AccessControlService 2024-12-07T17:22:56,063 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:22:56,063 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,063 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:22:56,063 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,063 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,065 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,068 INFO [StoreOpener-533037dcce0b4066fcb97082c19841df-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,070 INFO [StoreOpener-533037dcce0b4066fcb97082c19841df-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 533037dcce0b4066fcb97082c19841df columnFamilyName cf 2024-12-07T17:22:56,070 DEBUG [StoreOpener-533037dcce0b4066fcb97082c19841df-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:22:56,071 INFO [StoreOpener-533037dcce0b4066fcb97082c19841df-1 {}] regionserver.HStore(327): Store=533037dcce0b4066fcb97082c19841df/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:22:56,073 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,074 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,078 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,081 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:56,081 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:22:56,083 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 533037dcce0b4066fcb97082c19841df; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60141788, jitterRate=-0.10381752252578735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:22:56,088 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened fd33eca1e672f36235ceeba48f989a9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66261616, jitterRate=-0.012624979019165039}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:22:56,088 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 533037dcce0b4066fcb97082c19841df: 2024-12-07T17:22:56,088 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for fd33eca1e672f36235ceeba48f989a9d: 2024-12-07T17:22:56,090 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df., pid=16, masterSystemTime=1733592176033 2024-12-07T17:22:56,093 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d., pid=15, masterSystemTime=1733592176025 2024-12-07T17:22:56,095 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=533037dcce0b4066fcb97082c19841df, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:22:56,098 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:56,098 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:56,098 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:56,098 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:56,098 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=fd33eca1e672f36235ceeba48f989a9d, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:22:56,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=13 2024-12-07T17:22:56,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=13, state=SUCCESS; OpenRegionProcedure 533037dcce0b4066fcb97082c19841df, server=5c70517e8267,38423,1733592165361 in 219 msec 2024-12-07T17:22:56,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-07T17:22:56,106 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, ASSIGN in 393 msec 2024-12-07T17:22:56,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; OpenRegionProcedure fd33eca1e672f36235ceeba48f989a9d, server=5c70517e8267,41313,1733592165267 in 231 msec 2024-12-07T17:22:56,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=14, resume processing ppid=12 2024-12-07T17:22:56,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, ASSIGN in 396 msec 2024-12-07T17:22:56,112 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:22:56,112 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592176112"}]},"ts":"1733592176112"} 2024-12-07T17:22:56,114 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-07T17:22:56,120 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:22:56,125 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-07T17:22:56,133 DEBUG [hconnection-0x7d60148-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:56,134 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-07T17:22:56,142 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:56,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:22:56,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:22:56,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T17:22:56,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T17:22:56,182 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T17:22:56,182 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T17:22:56,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 1.0030 sec 2024-12-07T17:22:56,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T17:22:56,299 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-07T17:22:56,299 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-07T17:22:56,300 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:22:56,307 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-07T17:22:56,308 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:22:56,308 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-07T17:22:56,319 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T17:22:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592176319 (current time:1733592176319). 2024-12-07T17:22:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:22:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-07T17:22:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:22:56,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f8d8339 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@705d1eff 2024-12-07T17:22:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37998832, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:56,329 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:56,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f8d8339 to 127.0.0.1:58133 2024-12-07T17:22:56,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:22:56,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f9de74a to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7b7ad6 2024-12-07T17:22:56,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3098c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:56,347 DEBUG [hconnection-0x6eb3f27f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:56,349 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60606, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:56,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:56,353 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f9de74a to 127.0.0.1:58133 2024-12-07T17:22:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:22:56,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T17:22:56,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:22:56,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T17:22:56,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-07T17:22:56,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T17:22:56,391 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:22:56,398 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:22:56,415 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:22:56,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741847_1023 (size=167) 2024-12-07T17:22:56,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741847_1023 (size=167) 2024-12-07T17:22:56,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741847_1023 (size=167) 2024-12-07T17:22:56,448 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:22:56,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d}] 2024-12-07T17:22:56,457 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,459 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T17:22:56,614 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:22:56,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:22:56,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-07T17:22:56,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-07T17:22:56,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:56,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:56,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 533037dcce0b4066fcb97082c19841df: 2024-12-07T17:22:56,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for fd33eca1e672f36235ceeba48f989a9d: 2024-12-07T17:22:56,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. for emptySnaptb0-testExportWithTargetName completed. 2024-12-07T17:22:56,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. for emptySnaptb0-testExportWithTargetName completed. 2024-12-07T17:22:56,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-07T17:22:56,620 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-07T17:22:56,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:22:56,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:22:56,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:22:56,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:22:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741848_1024 (size=70) 2024-12-07T17:22:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741848_1024 (size=70) 2024-12-07T17:22:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741848_1024 (size=70) 2024-12-07T17:22:56,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:56,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-07T17:22:56,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-07T17:22:56,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,655 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:56,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741849_1025 (size=70) 2024-12-07T17:22:56,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741849_1025 (size=70) 2024-12-07T17:22:56,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741849_1025 (size=70) 2024-12-07T17:22:56,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:56,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-07T17:22:56,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-07T17:22:56,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,659 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:56,662 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d in 207 msec 2024-12-07T17:22:56,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-07T17:22:56,664 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df in 210 msec 2024-12-07T17:22:56,664 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:22:56,667 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:22:56,670 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:22:56,670 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-07T17:22:56,674 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-07T17:22:56,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T17:22:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741850_1026 (size=549) 2024-12-07T17:22:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741850_1026 (size=549) 2024-12-07T17:22:56,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741850_1026 (size=549) 2024-12-07T17:22:56,744 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:22:56,775 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:22:56,776 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-07T17:22:56,780 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:22:56,781 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-07T17:22:56,785 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 401 msec 2024-12-07T17:22:56,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T17:22:56,998 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-07T17:22:57,024 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:57,025 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:22:57,031 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:57,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:22:57,045 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-07T17:22:57,046 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:57,047 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:22:57,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T17:22:57,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592177078 (current time:1733592177078). 2024-12-07T17:22:57,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:22:57,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-07T17:22:57,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:22:57,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61446c9f to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@57827d02 2024-12-07T17:22:57,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ca7f4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:57,087 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:57,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61446c9f to 127.0.0.1:58133 2024-12-07T17:22:57,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:22:57,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x014f578a to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@456140cc 2024-12-07T17:22:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7937733, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:22:57,098 DEBUG [hconnection-0x2bcd5efe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:57,100 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:57,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:22:57,108 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:22:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x014f578a to 127.0.0.1:58133 2024-12-07T17:22:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:22:57,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T17:22:57,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:22:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T17:22:57,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-07T17:22:57,123 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:22:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T17:22:57,124 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:22:57,131 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:22:57,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741851_1027 (size=162) 2024-12-07T17:22:57,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741851_1027 (size=162) 2024-12-07T17:22:57,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741851_1027 (size=162) 2024-12-07T17:22:57,165 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:22:57,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d}] 2024-12-07T17:22:57,167 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:57,168 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:57,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T17:22:57,320 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:22:57,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-07T17:22:57,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:57,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 533037dcce0b4066fcb97082c19841df 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T17:22:57,326 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:22:57,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-07T17:22:57,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:57,328 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing fd33eca1e672f36235ceeba48f989a9d 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T17:22:57,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/.tmp/cf/5b7d9615c6764dad9de4c9911acda09b is 71, key is 012818904652780656b6f07aa49883da/cf:q/1733592177024/Put/seqid=0 2024-12-07T17:22:57,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/.tmp/cf/cf2c2dbd2540467783ab7720f3b1e448 is 71, key is 1626d464ca3900e9203ede2f1e72f425/cf:q/1733592177035/Put/seqid=0 2024-12-07T17:22:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T17:22:57,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741852_1028 (size=8188) 2024-12-07T17:22:57,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741852_1028 (size=8188) 2024-12-07T17:22:57,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741852_1028 (size=8188) 2024-12-07T17:22:57,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/.tmp/cf/cf2c2dbd2540467783ab7720f3b1e448 2024-12-07T17:22:57,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741853_1029 (size=5422) 2024-12-07T17:22:57,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741853_1029 (size=5422) 2024-12-07T17:22:57,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741853_1029 (size=5422) 2024-12-07T17:22:57,491 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/.tmp/cf/5b7d9615c6764dad9de4c9911acda09b 2024-12-07T17:22:57,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/.tmp/cf/5b7d9615c6764dad9de4c9911acda09b as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b 2024-12-07T17:22:57,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/.tmp/cf/cf2c2dbd2540467783ab7720f3b1e448 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448 2024-12-07T17:22:57,623 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b, entries=5, sequenceid=6, filesize=5.3 K 2024-12-07T17:22:57,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 533037dcce0b4066fcb97082c19841df in 307ms, sequenceid=6, compaction requested=false 2024-12-07T17:22:57,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-07T17:22:57,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 533037dcce0b4066fcb97082c19841df: 2024-12-07T17:22:57,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. for snaptb0-testExportWithTargetName completed. 2024-12-07T17:22:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-07T17:22:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:22:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b] hfiles 2024-12-07T17:22:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b for snapshot=snaptb0-testExportWithTargetName 2024-12-07T17:22:57,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448, entries=45, sequenceid=6, filesize=8.0 K 2024-12-07T17:22:57,661 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for fd33eca1e672f36235ceeba48f989a9d in 334ms, sequenceid=6, compaction requested=false 2024-12-07T17:22:57,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for fd33eca1e672f36235ceeba48f989a9d: 2024-12-07T17:22:57,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. for snaptb0-testExportWithTargetName completed. 2024-12-07T17:22:57,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-07T17:22:57,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:22:57,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448] hfiles 2024-12-07T17:22:57,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448 for snapshot=snaptb0-testExportWithTargetName 2024-12-07T17:22:57,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741854_1030 (size=109) 2024-12-07T17:22:57,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741854_1030 (size=109) 2024-12-07T17:22:57,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741854_1030 (size=109) 2024-12-07T17:22:57,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:22:57,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-07T17:22:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-07T17:22:57,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:57,682 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df 2024-12-07T17:22:57,686 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 533037dcce0b4066fcb97082c19841df in 518 msec 2024-12-07T17:22:57,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741855_1031 (size=109) 2024-12-07T17:22:57,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741855_1031 (size=109) 2024-12-07T17:22:57,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741855_1031 (size=109) 2024-12-07T17:22:57,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:22:57,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-07T17:22:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-07T17:22:57,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:57,699 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:22:57,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-07T17:22:57,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure fd33eca1e672f36235ceeba48f989a9d in 535 msec 2024-12-07T17:22:57,704 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:22:57,706 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:22:57,707 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:22:57,707 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-07T17:22:57,709 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-07T17:22:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T17:22:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741856_1032 (size=627) 2024-12-07T17:22:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741856_1032 (size=627) 2024-12-07T17:22:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741856_1032 (size=627) 2024-12-07T17:22:57,775 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:22:57,821 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:22:57,822 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-07T17:22:57,826 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:22:57,826 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-07T17:22:57,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 710 msec 2024-12-07T17:22:58,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T17:22:58,233 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-07T17:22:58,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233 2024-12-07T17:22:58,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:58,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:22:58,299 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-07T17:22:58,310 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:22:58,328 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-07T17:22:58,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741857_1033 (size=627) 2024-12-07T17:22:58,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741857_1033 (size=627) 2024-12-07T17:22:58,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741857_1033 (size=627) 2024-12-07T17:22:58,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741858_1034 (size=162) 2024-12-07T17:22:58,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741858_1034 (size=162) 2024-12-07T17:22:58,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741858_1034 (size=162) 2024-12-07T17:22:58,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741859_1035 (size=154) 2024-12-07T17:22:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741859_1035 (size=154) 2024-12-07T17:22:58,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741859_1035 (size=154) 2024-12-07T17:22:58,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:58,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:58,500 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:58,501 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-811202095989923169.jar 2024-12-07T17:22:59,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-5866931226681005420.jar 2024-12-07T17:22:59,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:22:59,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:22:59,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:22:59,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:22:59,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:22:59,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:22:59,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:22:59,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:22:59,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:22:59,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:22:59,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:22:59,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:22:59,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:22:59,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:22:59,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:22:59,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:22:59,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:22:59,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:22:59,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:22:59,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:00,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741860_1036 (size=127628) 2024-12-07T17:23:00,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741860_1036 (size=127628) 2024-12-07T17:23:00,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741860_1036 (size=127628) 2024-12-07T17:23:00,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T17:23:00,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T17:23:00,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T17:23:00,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741862_1038 (size=213228) 2024-12-07T17:23:00,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741862_1038 (size=213228) 2024-12-07T17:23:00,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741862_1038 (size=213228) 2024-12-07T17:23:00,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T17:23:00,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T17:23:00,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T17:23:00,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741864_1040 (size=533455) 2024-12-07T17:23:00,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741864_1040 (size=533455) 2024-12-07T17:23:00,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741864_1040 (size=533455) 2024-12-07T17:23:00,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T17:23:00,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T17:23:00,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T17:23:00,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T17:23:00,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T17:23:00,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T17:23:00,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741867_1043 (size=20406) 2024-12-07T17:23:00,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741867_1043 (size=20406) 2024-12-07T17:23:00,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741867_1043 (size=20406) 2024-12-07T17:23:00,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741868_1044 (size=75495) 2024-12-07T17:23:00,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741868_1044 (size=75495) 2024-12-07T17:23:00,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741868_1044 (size=75495) 2024-12-07T17:23:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741869_1045 (size=45609) 2024-12-07T17:23:00,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741869_1045 (size=45609) 2024-12-07T17:23:00,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741869_1045 (size=45609) 2024-12-07T17:23:00,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741870_1046 (size=110084) 2024-12-07T17:23:00,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741870_1046 (size=110084) 2024-12-07T17:23:00,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741870_1046 (size=110084) 2024-12-07T17:23:00,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741871_1047 (size=451756) 2024-12-07T17:23:00,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741871_1047 (size=451756) 2024-12-07T17:23:00,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741871_1047 (size=451756) 2024-12-07T17:23:00,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741872_1048 (size=6350154) 2024-12-07T17:23:00,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741872_1048 (size=6350154) 2024-12-07T17:23:00,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741872_1048 (size=6350154) 2024-12-07T17:23:00,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741873_1049 (size=1323991) 2024-12-07T17:23:00,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741873_1049 (size=1323991) 2024-12-07T17:23:00,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741873_1049 (size=1323991) 2024-12-07T17:23:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741874_1050 (size=23076) 2024-12-07T17:23:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741874_1050 (size=23076) 2024-12-07T17:23:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741874_1050 (size=23076) 2024-12-07T17:23:00,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741875_1051 (size=126803) 2024-12-07T17:23:00,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741875_1051 (size=126803) 2024-12-07T17:23:00,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741875_1051 (size=126803) 2024-12-07T17:23:00,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741876_1052 (size=322274) 2024-12-07T17:23:00,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741876_1052 (size=322274) 2024-12-07T17:23:00,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741876_1052 (size=322274) 2024-12-07T17:23:00,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741877_1053 (size=1832290) 2024-12-07T17:23:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741877_1053 (size=1832290) 2024-12-07T17:23:00,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741877_1053 (size=1832290) 2024-12-07T17:23:00,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741878_1054 (size=30081) 2024-12-07T17:23:00,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741878_1054 (size=30081) 2024-12-07T17:23:00,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741878_1054 (size=30081) 2024-12-07T17:23:01,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741879_1055 (size=53616) 2024-12-07T17:23:01,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741879_1055 (size=53616) 2024-12-07T17:23:01,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741879_1055 (size=53616) 2024-12-07T17:23:01,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741880_1056 (size=29229) 2024-12-07T17:23:01,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741880_1056 (size=29229) 2024-12-07T17:23:01,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741880_1056 (size=29229) 2024-12-07T17:23:01,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741881_1057 (size=169089) 2024-12-07T17:23:01,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741881_1057 (size=169089) 2024-12-07T17:23:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741881_1057 (size=169089) 2024-12-07T17:23:01,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741882_1058 (size=5175431) 2024-12-07T17:23:01,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741882_1058 (size=5175431) 2024-12-07T17:23:01,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741882_1058 (size=5175431) 2024-12-07T17:23:01,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741883_1059 (size=136454) 2024-12-07T17:23:01,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741883_1059 (size=136454) 2024-12-07T17:23:01,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741883_1059 (size=136454) 2024-12-07T17:23:01,557 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:23:01,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741884_1060 (size=907853) 2024-12-07T17:23:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741884_1060 (size=907853) 2024-12-07T17:23:01,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741884_1060 (size=907853) 2024-12-07T17:23:01,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T17:23:01,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T17:23:01,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T17:23:01,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741886_1062 (size=503880) 2024-12-07T17:23:01,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741886_1062 (size=503880) 2024-12-07T17:23:01,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741886_1062 (size=503880) 2024-12-07T17:23:01,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T17:23:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T17:23:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T17:23:01,908 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:23:01,925 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-07T17:23:01,943 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:23:01,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741888_1064 (size=342) 2024-12-07T17:23:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741888_1064 (size=342) 2024-12-07T17:23:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741888_1064 (size=342) 2024-12-07T17:23:02,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741889_1065 (size=15) 2024-12-07T17:23:02,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741889_1065 (size=15) 2024-12-07T17:23:02,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741889_1065 (size=15) 2024-12-07T17:23:02,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741890_1066 (size=304884) 2024-12-07T17:23:02,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741890_1066 (size=304884) 2024-12-07T17:23:02,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741890_1066 (size=304884) 2024-12-07T17:23:02,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:02,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:02,780 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0001_000001 (auth:SIMPLE) from 127.0.0.1:53416 2024-12-07T17:23:04,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-07T17:23:04,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-07T17:23:11,672 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0001_000001 (auth:SIMPLE) from 127.0.0.1:52412 2024-12-07T17:23:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741891_1067 (size=350558) 2024-12-07T17:23:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741891_1067 (size=350558) 2024-12-07T17:23:12,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741891_1067 (size=350558) 2024-12-07T17:23:13,249 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:23:14,010 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0001_000001 (auth:SIMPLE) from 127.0.0.1:46424 2024-12-07T17:23:19,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741892_1068 (size=8188) 2024-12-07T17:23:19,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741892_1068 (size=8188) 2024-12-07T17:23:19,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741892_1068 (size=8188) 2024-12-07T17:23:19,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741893_1069 (size=5422) 2024-12-07T17:23:19,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741893_1069 (size=5422) 2024-12-07T17:23:19,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741893_1069 (size=5422) 2024-12-07T17:23:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741894_1070 (size=17419) 2024-12-07T17:23:19,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741894_1070 (size=17419) 2024-12-07T17:23:19,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741894_1070 (size=17419) 2024-12-07T17:23:19,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741895_1071 (size=464) 2024-12-07T17:23:19,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741895_1071 (size=464) 2024-12-07T17:23:19,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741895_1071 (size=464) 2024-12-07T17:23:19,405 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000002/launch_container.sh] 2024-12-07T17:23:19,406 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000002/container_tokens] 2024-12-07T17:23:19,406 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000002/sysfs] 2024-12-07T17:23:19,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741896_1072 (size=17419) 2024-12-07T17:23:19,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741896_1072 (size=17419) 2024-12-07T17:23:19,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741896_1072 (size=17419) 2024-12-07T17:23:19,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741897_1073 (size=350558) 2024-12-07T17:23:19,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741897_1073 (size=350558) 2024-12-07T17:23:19,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741897_1073 (size=350558) 2024-12-07T17:23:19,477 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0001_000001 (auth:SIMPLE) from 127.0.0.1:46432 2024-12-07T17:23:19,951 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:23:19,953 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:23:20,938 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:23:20,939 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:23:20,941 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T17:23:20,943 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T17:23:21,120 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:23:21,122 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:23:21,145 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-07T17:23:21,145 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:23:21,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:23:21,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-07T17:23:21,148 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-07T17:23:21,148 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-07T17:23:21,148 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/testExportWithTargetName 2024-12-07T17:23:21,149 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-07T17:23:21,149 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592178233/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-07T17:23:21,176 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-07T17:23:21,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-07T17:23:21,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,194 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592201193"}]},"ts":"1733592201193"} 2024-12-07T17:23:21,197 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-07T17:23:21,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T17:23:21,200 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-07T17:23:21,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-07T17:23:21,209 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, UNASSIGN}] 2024-12-07T17:23:21,211 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, UNASSIGN 2024-12-07T17:23:21,211 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, UNASSIGN 2024-12-07T17:23:21,213 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=fd33eca1e672f36235ceeba48f989a9d, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:23:21,213 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=533037dcce0b4066fcb97082c19841df, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:21,216 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:21,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure fd33eca1e672f36235ceeba48f989a9d, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:23:21,221 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:21,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=25, state=RUNNABLE; CloseRegionProcedure 533037dcce0b4066fcb97082c19841df, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:21,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T17:23:21,375 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:23:21,375 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:21,378 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 533037dcce0b4066fcb97082c19841df 2024-12-07T17:23:21,378 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:23:21,378 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:21,378 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:21,379 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 533037dcce0b4066fcb97082c19841df, disabling compactions & flushes 2024-12-07T17:23:21,379 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:23:21,379 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:23:21,379 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. after waiting 0 ms 2024-12-07T17:23:21,379 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:23:21,380 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing fd33eca1e672f36235ceeba48f989a9d, disabling compactions & flushes 2024-12-07T17:23:21,380 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:23:21,380 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:23:21,380 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. after waiting 0 ms 2024-12-07T17:23:21,380 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:23:21,392 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:23:21,393 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:23:21,397 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:21,397 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:21,397 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d. 2024-12-07T17:23:21,397 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df. 2024-12-07T17:23:21,397 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for fd33eca1e672f36235ceeba48f989a9d: 2024-12-07T17:23:21,397 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 533037dcce0b4066fcb97082c19841df: 2024-12-07T17:23:21,406 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:23:21,410 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=fd33eca1e672f36235ceeba48f989a9d, regionState=CLOSED 2024-12-07T17:23:21,410 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 533037dcce0b4066fcb97082c19841df 2024-12-07T17:23:21,411 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=533037dcce0b4066fcb97082c19841df, regionState=CLOSED 2024-12-07T17:23:21,418 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-07T17:23:21,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure fd33eca1e672f36235ceeba48f989a9d, server=5c70517e8267,41313,1733592165267 in 199 msec 2024-12-07T17:23:21,420 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=fd33eca1e672f36235ceeba48f989a9d, UNASSIGN in 209 msec 2024-12-07T17:23:21,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=25 2024-12-07T17:23:21,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=25, state=SUCCESS; CloseRegionProcedure 533037dcce0b4066fcb97082c19841df, server=5c70517e8267,38423,1733592165361 in 196 msec 2024-12-07T17:23:21,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-07T17:23:21,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=533037dcce0b4066fcb97082c19841df, UNASSIGN in 211 msec 2024-12-07T17:23:21,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-07T17:23:21,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 222 msec 2024-12-07T17:23:21,430 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592201429"}]},"ts":"1733592201429"} 2024-12-07T17:23:21,432 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-07T17:23:21,434 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-07T17:23:21,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 253 msec 2024-12-07T17:23:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T17:23:21,501 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-07T17:23:21,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-07T17:23:21,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,510 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-07T17:23:21,512 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,515 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-07T17:23:21,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,520 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T17:23:21,521 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T17:23:21,521 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T17:23:21,521 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df 2024-12-07T17:23:21,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:21,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:21,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:21,523 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-07T17:23:21,523 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:21,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T17:23:21,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:21,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-07T17:23:21,527 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/recovered.edits] 2024-12-07T17:23:21,528 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:23:21,531 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/recovered.edits] 2024-12-07T17:23:21,536 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/cf/5b7d9615c6764dad9de4c9911acda09b 2024-12-07T17:23:21,536 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/cf/cf2c2dbd2540467783ab7720f3b1e448 2024-12-07T17:23:21,542 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df/recovered.edits/9.seqid 2024-12-07T17:23:21,542 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d/recovered.edits/9.seqid 2024-12-07T17:23:21,543 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/533037dcce0b4066fcb97082c19841df 2024-12-07T17:23:21,543 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithTargetName/fd33eca1e672f36235ceeba48f989a9d 2024-12-07T17:23:21,543 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-07T17:23:21,547 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-07T17:23:21,556 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-07T17:23:21,560 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-07T17:23:21,562 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,562 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-07T17:23:21,563 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592201562"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:21,563 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592201562"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:21,566 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:23:21,566 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 533037dcce0b4066fcb97082c19841df, NAME => 'testtb-testExportWithTargetName,,1733592175177.533037dcce0b4066fcb97082c19841df.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fd33eca1e672f36235ceeba48f989a9d, NAME => 'testtb-testExportWithTargetName,1,1733592175177.fd33eca1e672f36235ceeba48f989a9d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:23:21,566 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-07T17:23:21,567 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592201566"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:21,569 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-07T17:23:21,573 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T17:23:21,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 68 msec 2024-12-07T17:23:21,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-07T17:23:21,628 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-07T17:23:21,646 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-07T17:23:21,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-07T17:23:21,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-07T17:23:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-07T17:23:21,690 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=774 (was 719) Potentially hanging thread: RS_OPEN_REGION-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18623) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:43173 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44421 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:33183 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:56958 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33183 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36619 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:44421 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-1294 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:59048 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:37312 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-101113162_1 at /127.0.0.1:59036 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-101113162_1 at /127.0.0.1:56940 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43173 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=797 (was 763) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=539 (was 481) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=5659 (was 7323) 2024-12-07T17:23:21,690 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-12-07T17:23:21,715 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=774, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=539, ProcessCount=17, AvailableMemoryMB=5652 2024-12-07T17:23:21,715 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=774 is superior to 500 2024-12-07T17:23:21,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:23:21,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:21,721 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:23:21,721 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:21,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-07T17:23:21,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T17:23:21,722 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:23:21,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741898_1074 (size=404) 2024-12-07T17:23:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741898_1074 (size=404) 2024-12-07T17:23:21,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741898_1074 (size=404) 2024-12-07T17:23:21,750 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a1332a7519281371dde400bf41911c38, NAME => 'testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:21,750 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b015e76782accb7dfc05754188145804, NAME => 'testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:21,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741900_1076 (size=65) 2024-12-07T17:23:21,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741900_1076 (size=65) 2024-12-07T17:23:21,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741900_1076 (size=65) 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing b015e76782accb7dfc05754188145804, disabling compactions & flushes 2024-12-07T17:23:21,778 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. after waiting 0 ms 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:21,778 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:21,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for b015e76782accb7dfc05754188145804: 2024-12-07T17:23:21,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741899_1075 (size=65) 2024-12-07T17:23:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741899_1075 (size=65) 2024-12-07T17:23:21,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741899_1075 (size=65) 2024-12-07T17:23:21,790 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:21,790 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing a1332a7519281371dde400bf41911c38, disabling compactions & flushes 2024-12-07T17:23:21,790 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:21,791 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:21,791 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. after waiting 0 ms 2024-12-07T17:23:21,791 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:21,791 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:21,791 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for a1332a7519281371dde400bf41911c38: 2024-12-07T17:23:21,793 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:23:21,793 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592201793"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592201793"}]},"ts":"1733592201793"} 2024-12-07T17:23:21,793 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592201793"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592201793"}]},"ts":"1733592201793"} 2024-12-07T17:23:21,797 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:23:21,798 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:23:21,798 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592201798"}]},"ts":"1733592201798"} 2024-12-07T17:23:21,800 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-07T17:23:21,805 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:23:21,807 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:23:21,807 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:23:21,807 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:23:21,807 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:23:21,807 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:23:21,807 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:23:21,807 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:23:21,807 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, ASSIGN}] 2024-12-07T17:23:21,809 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, ASSIGN 2024-12-07T17:23:21,810 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, ASSIGN 2024-12-07T17:23:21,811 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:23:21,812 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:23:21,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T17:23:21,961 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:23:21,962 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=b015e76782accb7dfc05754188145804, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:21,962 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=a1332a7519281371dde400bf41911c38, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:21,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure b015e76782accb7dfc05754188145804, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:23:21,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure a1332a7519281371dde400bf41911c38, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T17:23:22,118 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:22,122 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,123 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => b015e76782accb7dfc05754188145804, NAME => 'testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:23:22,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:22,123 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. service=AccessControlService 2024-12-07T17:23:22,123 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:22,123 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,124 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:22,124 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,124 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,126 INFO [StoreOpener-b015e76782accb7dfc05754188145804-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,126 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,127 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => a1332a7519281371dde400bf41911c38, NAME => 'testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:23:22,127 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. service=AccessControlService 2024-12-07T17:23:22,128 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:22,128 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,128 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:22,128 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,128 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,129 INFO [StoreOpener-b015e76782accb7dfc05754188145804-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b015e76782accb7dfc05754188145804 columnFamilyName cf 2024-12-07T17:23:22,129 DEBUG [StoreOpener-b015e76782accb7dfc05754188145804-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:22,129 INFO [StoreOpener-a1332a7519281371dde400bf41911c38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,130 INFO [StoreOpener-b015e76782accb7dfc05754188145804-1 {}] regionserver.HStore(327): Store=b015e76782accb7dfc05754188145804/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:22,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,132 INFO [StoreOpener-a1332a7519281371dde400bf41911c38-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1332a7519281371dde400bf41911c38 columnFamilyName cf 2024-12-07T17:23:22,132 DEBUG [StoreOpener-a1332a7519281371dde400bf41911c38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:22,133 INFO [StoreOpener-a1332a7519281371dde400bf41911c38-1 {}] regionserver.HStore(327): Store=a1332a7519281371dde400bf41911c38/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:22,133 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,134 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,134 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,136 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,136 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:22,137 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened b015e76782accb7dfc05754188145804; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67012454, jitterRate=-0.0014366209506988525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:22,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for b015e76782accb7dfc05754188145804: 2024-12-07T17:23:22,139 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:22,140 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened a1332a7519281371dde400bf41911c38; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64934621, jitterRate=-0.03239874541759491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:22,140 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804., pid=33, masterSystemTime=1733592202118 2024-12-07T17:23:22,140 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for a1332a7519281371dde400bf41911c38: 2024-12-07T17:23:22,141 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38., pid=34, masterSystemTime=1733592202123 2024-12-07T17:23:22,142 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,142 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,143 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=b015e76782accb7dfc05754188145804, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:22,143 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,143 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,144 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=a1332a7519281371dde400bf41911c38, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:22,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-07T17:23:22,148 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure b015e76782accb7dfc05754188145804, server=5c70517e8267,36163,1733592165482 in 180 msec 2024-12-07T17:23:22,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-07T17:23:22,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure a1332a7519281371dde400bf41911c38, server=5c70517e8267,38423,1733592165361 in 176 msec 2024-12-07T17:23:22,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, ASSIGN in 341 msec 2024-12-07T17:23:22,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=30 2024-12-07T17:23:22,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, ASSIGN in 342 msec 2024-12-07T17:23:22,152 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:23:22,152 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592202152"}]},"ts":"1733592202152"} 2024-12-07T17:23:22,153 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-07T17:23:22,156 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:23:22,157 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-07T17:23:22,159 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T17:23:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:22,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:22,163 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:22,163 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:22,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:22,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:22,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 446 msec 2024-12-07T17:23:22,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T17:23:22,328 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-07T17:23:22,328 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-07T17:23:22,328 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:22,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-07T17:23:22,333 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:22,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-07T17:23:22,336 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T17:23:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592202337 (current time:1733592202337). 2024-12-07T17:23:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:23:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T17:23:22,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:23:22,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b6c53e7 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78180e2d 2024-12-07T17:23:22,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b08984a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:22,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,345 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b6c53e7 to 127.0.0.1:58133 2024-12-07T17:23:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:22,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d72a750 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65a3b3b5 2024-12-07T17:23:22,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7af6bd83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:22,357 DEBUG [hconnection-0x1fb1a32d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,359 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,362 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d72a750 to 127.0.0.1:58133 2024-12-07T17:23:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T17:23:22,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:23:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T17:23:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-07T17:23:22,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T17:23:22,368 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:23:22,369 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:23:22,373 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:23:22,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741901_1077 (size=161) 2024-12-07T17:23:22,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741901_1077 (size=161) 2024-12-07T17:23:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741901_1077 (size=161) 2024-12-07T17:23:22,384 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:23:22,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38}] 2024-12-07T17:23:22,385 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,385 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T17:23:22,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:22,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:22,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-07T17:23:22,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for a1332a7519281371dde400bf41911c38: 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for b015e76782accb7dfc05754188145804: 2024-12-07T17:23:22,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:22,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:23:22,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741903_1079 (size=68) 2024-12-07T17:23:22,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741903_1079 (size=68) 2024-12-07T17:23:22,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741903_1079 (size=68) 2024-12-07T17:23:22,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-07T17:23:22,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-07T17:23:22,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,562 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 in 180 msec 2024-12-07T17:23:22,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741902_1078 (size=68) 2024-12-07T17:23:22,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741902_1078 (size=68) 2024-12-07T17:23:22,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741902_1078 (size=68) 2024-12-07T17:23:22,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-07T17:23:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-07T17:23:22,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,572 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=35 2024-12-07T17:23:22,575 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 in 189 msec 2024-12-07T17:23:22,575 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:23:22,576 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:23:22,577 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:23:22,577 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:22,579 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:22,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741904_1080 (size=543) 2024-12-07T17:23:22,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741904_1080 (size=543) 2024-12-07T17:23:22,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741904_1080 (size=543) 2024-12-07T17:23:22,603 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:23:22,611 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:23:22,611 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:22,613 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:23:22,613 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-07T17:23:22,615 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 249 msec 2024-12-07T17:23:22,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T17:23:22,671 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-07T17:23:22,681 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,683 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:22,685 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:22,697 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-07T17:23:22,698 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,698 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:22,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T17:23:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592202715 (current time:1733592202715). 2024-12-07T17:23:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:23:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T17:23:22,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:23:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60a25a2c to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@729835de 2024-12-07T17:23:22,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b8d488e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:22,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,724 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60a25a2c to 127.0.0.1:58133 2024-12-07T17:23:22,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:22,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b61a7ed to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fb2126f 2024-12-07T17:23:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45121645, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:22,736 DEBUG [hconnection-0x3acc45e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,737 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:22,740 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b61a7ed to 127.0.0.1:58133 2024-12-07T17:23:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:22,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T17:23:22,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:23:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T17:23:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-07T17:23:22,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T17:23:22,745 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:23:22,746 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:23:22,748 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:23:22,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741905_1081 (size=156) 2024-12-07T17:23:22,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741905_1081 (size=156) 2024-12-07T17:23:22,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741905_1081 (size=156) 2024-12-07T17:23:22,763 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:23:22,763 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38}] 2024-12-07T17:23:22,764 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,765 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 2024-12-07T17:23:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T17:23:22,916 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:22,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:22,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-07T17:23:22,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-07T17:23:22,917 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing a1332a7519281371dde400bf41911c38 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-07T17:23:22,918 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:22,918 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing b015e76782accb7dfc05754188145804 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-07T17:23:22,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/.tmp/cf/caeccdd612984aec8d8892411a0e2690 is 71, key is 1ab9a81c167affb9a53c54c46d01f47c/cf:q/1733592202685/Put/seqid=0 2024-12-07T17:23:22,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/.tmp/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 is 71, key is 03941c795a95c9fa5bb01e1a417c30c8/cf:q/1733592202684/Put/seqid=0 2024-12-07T17:23:22,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741906_1082 (size=8256) 2024-12-07T17:23:22,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741906_1082 (size=8256) 2024-12-07T17:23:22,953 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/.tmp/cf/caeccdd612984aec8d8892411a0e2690 2024-12-07T17:23:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741906_1082 (size=8256) 2024-12-07T17:23:22,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741907_1083 (size=5354) 2024-12-07T17:23:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741907_1083 (size=5354) 2024-12-07T17:23:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741907_1083 (size=5354) 2024-12-07T17:23:22,962 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/.tmp/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 2024-12-07T17:23:22,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/.tmp/cf/caeccdd612984aec8d8892411a0e2690 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690 2024-12-07T17:23:22,970 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690, entries=46, sequenceid=6, filesize=8.1 K 2024-12-07T17:23:22,971 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a1332a7519281371dde400bf41911c38 in 54ms, sequenceid=6, compaction requested=false 2024-12-07T17:23:22,971 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for a1332a7519281371dde400bf41911c38: 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. for snaptb0-testExportWithResetTtl completed. 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690] hfiles 2024-12-07T17:23:22,972 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690 for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T17:23:22,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741908_1084 (size=107) 2024-12-07T17:23:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741908_1084 (size=107) 2024-12-07T17:23:22,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741908_1084 (size=107) 2024-12-07T17:23:22,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:22,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-07T17:23:22,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-07T17:23:22,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,993 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 2024-12-07T17:23:22,995 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/.tmp/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 2024-12-07T17:23:22,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure a1332a7519281371dde400bf41911c38 in 231 msec 2024-12-07T17:23:23,002 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13, entries=4, sequenceid=6, filesize=5.2 K 2024-12-07T17:23:23,003 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for b015e76782accb7dfc05754188145804 in 85ms, sequenceid=6, compaction requested=false 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for b015e76782accb7dfc05754188145804: 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. for snaptb0-testExportWithResetTtl completed. 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13] hfiles 2024-12-07T17:23:23,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T17:23:23,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741909_1085 (size=107) 2024-12-07T17:23:23,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741909_1085 (size=107) 2024-12-07T17:23:23,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741909_1085 (size=107) 2024-12-07T17:23:23,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:23,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-07T17:23:23,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-07T17:23:23,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region b015e76782accb7dfc05754188145804 2024-12-07T17:23:23,024 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 2024-12-07T17:23:23,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-07T17:23:23,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure b015e76782accb7dfc05754188145804 in 262 msec 2024-12-07T17:23:23,029 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:23:23,030 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:23:23,031 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:23:23,031 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-07T17:23:23,032 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-07T17:23:23,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T17:23:23,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741910_1086 (size=621) 2024-12-07T17:23:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741910_1086 (size=621) 2024-12-07T17:23:23,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741910_1086 (size=621) 2024-12-07T17:23:23,060 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:23:23,069 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:23:23,069 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-07T17:23:23,071 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:23:23,071 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-07T17:23:23,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 329 msec 2024-12-07T17:23:23,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T17:23:23,349 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-07T17:23:23,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:23:23,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:23,353 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:23:23,353 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:23,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-07T17:23:23,354 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:23:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T17:23:23,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741911_1087 (size=397) 2024-12-07T17:23:23,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741911_1087 (size=397) 2024-12-07T17:23:23,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741911_1087 (size=397) 2024-12-07T17:23:23,366 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 69368cd96183d60dedbf4e544ddc6bec, NAME => 'testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:23,367 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 07546b523f5cec3721a2f1e7b7bbbe84, NAME => 'testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:23,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741912_1088 (size=58) 2024-12-07T17:23:23,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741912_1088 (size=58) 2024-12-07T17:23:23,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741912_1088 (size=58) 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 07546b523f5cec3721a2f1e7b7bbbe84, disabling compactions & flushes 2024-12-07T17:23:23,381 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. after waiting 0 ms 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,381 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,381 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 07546b523f5cec3721a2f1e7b7bbbe84: 2024-12-07T17:23:23,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741913_1089 (size=58) 2024-12-07T17:23:23,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741913_1089 (size=58) 2024-12-07T17:23:23,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741913_1089 (size=58) 2024-12-07T17:23:23,391 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:23,392 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 69368cd96183d60dedbf4e544ddc6bec, disabling compactions & flushes 2024-12-07T17:23:23,392 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,392 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,392 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. after waiting 0 ms 2024-12-07T17:23:23,392 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,392 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,392 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 69368cd96183d60dedbf4e544ddc6bec: 2024-12-07T17:23:23,393 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:23:23,394 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733592203393"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592203393"}]},"ts":"1733592203393"} 2024-12-07T17:23:23,394 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733592203393"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592203393"}]},"ts":"1733592203393"} 2024-12-07T17:23:23,397 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:23:23,398 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:23:23,398 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592203398"}]},"ts":"1733592203398"} 2024-12-07T17:23:23,400 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-07T17:23:23,404 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:23:23,405 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:23:23,405 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:23:23,405 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:23:23,405 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:23:23,405 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:23:23,405 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:23:23,405 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:23:23,405 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, ASSIGN}] 2024-12-07T17:23:23,407 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, ASSIGN 2024-12-07T17:23:23,407 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, ASSIGN 2024-12-07T17:23:23,408 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:23:23,408 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:23:23,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T17:23:23,558 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:23:23,558 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=07546b523f5cec3721a2f1e7b7bbbe84, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:23,558 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=69368cd96183d60dedbf4e544ddc6bec, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:23,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; OpenRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:23:23,562 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=42, state=RUNNABLE; OpenRegionProcedure 69368cd96183d60dedbf4e544ddc6bec, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T17:23:23,714 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:23,714 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:23,718 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,719 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 69368cd96183d60dedbf4e544ddc6bec, NAME => 'testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:23:23,719 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,719 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 07546b523f5cec3721a2f1e7b7bbbe84, NAME => 'testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:23:23,719 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. service=AccessControlService 2024-12-07T17:23:23,719 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. service=AccessControlService 2024-12-07T17:23:23,719 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:23,720 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,720 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,726 INFO [StoreOpener-07546b523f5cec3721a2f1e7b7bbbe84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,726 INFO [StoreOpener-69368cd96183d60dedbf4e544ddc6bec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,728 INFO [StoreOpener-07546b523f5cec3721a2f1e7b7bbbe84-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 07546b523f5cec3721a2f1e7b7bbbe84 columnFamilyName cf 2024-12-07T17:23:23,728 DEBUG [StoreOpener-07546b523f5cec3721a2f1e7b7bbbe84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:23,728 INFO [StoreOpener-69368cd96183d60dedbf4e544ddc6bec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69368cd96183d60dedbf4e544ddc6bec columnFamilyName cf 2024-12-07T17:23:23,729 DEBUG [StoreOpener-69368cd96183d60dedbf4e544ddc6bec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:23,729 INFO [StoreOpener-07546b523f5cec3721a2f1e7b7bbbe84-1 {}] regionserver.HStore(327): Store=07546b523f5cec3721a2f1e7b7bbbe84/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:23,729 INFO [StoreOpener-69368cd96183d60dedbf4e544ddc6bec-1 {}] regionserver.HStore(327): Store=69368cd96183d60dedbf4e544ddc6bec/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:23,730 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,730 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,731 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,731 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,734 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:23,735 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:23,738 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:23,738 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:23,739 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 07546b523f5cec3721a2f1e7b7bbbe84; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62679082, jitterRate=-0.06600889563560486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:23,740 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 69368cd96183d60dedbf4e544ddc6bec; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62710463, jitterRate=-0.06554128229618073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:23,740 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 69368cd96183d60dedbf4e544ddc6bec: 2024-12-07T17:23:23,740 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 07546b523f5cec3721a2f1e7b7bbbe84: 2024-12-07T17:23:23,741 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec., pid=45, masterSystemTime=1733592203714 2024-12-07T17:23:23,742 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84., pid=44, masterSystemTime=1733592203714 2024-12-07T17:23:23,745 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,745 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:23,745 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=69368cd96183d60dedbf4e544ddc6bec, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:23,746 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,746 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:23,746 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=07546b523f5cec3721a2f1e7b7bbbe84, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:23,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=42 2024-12-07T17:23:23,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=42, state=SUCCESS; OpenRegionProcedure 69368cd96183d60dedbf4e544ddc6bec, server=5c70517e8267,38423,1733592165361 in 186 msec 2024-12-07T17:23:23,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-07T17:23:23,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, ASSIGN in 347 msec 2024-12-07T17:23:23,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; OpenRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84, server=5c70517e8267,36163,1733592165482 in 189 msec 2024-12-07T17:23:23,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-07T17:23:23,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, ASSIGN in 349 msec 2024-12-07T17:23:23,759 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:23:23,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592203760"}]},"ts":"1733592203760"} 2024-12-07T17:23:23,762 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-07T17:23:23,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:23:23,767 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-07T17:23:23,770 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T17:23:23,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:23,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:23,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:23,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:23,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,777 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,777 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,778 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,778 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:23,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 425 msec 2024-12-07T17:23:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T17:23:23,963 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-07T17:23:23,964 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-07T17:23:23,964 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:23,972 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-07T17:23:23,972 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:23,972 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-07T17:23:24,005 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:24,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:24,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-07T17:23:24,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:24,011 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:24,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-07T17:23:24,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592204028 (current time:1733592204028). 2024-12-07T17:23:24,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T17:23:24,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:23:24,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x198a3bd0 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c94cd9 2024-12-07T17:23:24,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60719c99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:24,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:24,037 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:24,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x198a3bd0 to 127.0.0.1:58133 2024-12-07T17:23:24,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:24,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c0fe486 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebd65a 2024-12-07T17:23:24,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b12076a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:24,052 DEBUG [hconnection-0x77c6b104-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:24,054 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:24,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:24,058 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:24,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c0fe486 to 127.0.0.1:58133 2024-12-07T17:23:24,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:24,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T17:23:24,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:23:24,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-07T17:23:24,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-07T17:23:24,065 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:23:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T17:23:24,066 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:23:24,070 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:23:24,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741914_1090 (size=143) 2024-12-07T17:23:24,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741914_1090 (size=143) 2024-12-07T17:23:24,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741914_1090 (size=143) 2024-12-07T17:23:24,095 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:23:24,096 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 69368cd96183d60dedbf4e544ddc6bec}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84}] 2024-12-07T17:23:24,097 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:24,097 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:24,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T17:23:24,248 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:24,248 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:24,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-07T17:23:24,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-07T17:23:24,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:24,249 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 69368cd96183d60dedbf4e544ddc6bec 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T17:23:24,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:24,251 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 07546b523f5cec3721a2f1e7b7bbbe84 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T17:23:24,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/.tmp/cf/45b5a6447ae945d48d2b87d913fb3bae is 71, key is 12c5e9deb6508edf2721f7e9f064668f/cf:q/1733592204005/Put/seqid=0 2024-12-07T17:23:24,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/.tmp/cf/05382327c7704055b889b7ede1f8eb1f is 71, key is 009e31079233916007eab30a70400fc7/cf:q/1733592204005/Put/seqid=0 2024-12-07T17:23:24,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741915_1091 (size=8188) 2024-12-07T17:23:24,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741915_1091 (size=8188) 2024-12-07T17:23:24,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741915_1091 (size=8188) 2024-12-07T17:23:24,308 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/.tmp/cf/45b5a6447ae945d48d2b87d913fb3bae 2024-12-07T17:23:24,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741916_1092 (size=5422) 2024-12-07T17:23:24,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741916_1092 (size=5422) 2024-12-07T17:23:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741916_1092 (size=5422) 2024-12-07T17:23:24,313 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/.tmp/cf/05382327c7704055b889b7ede1f8eb1f 2024-12-07T17:23:24,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/.tmp/cf/45b5a6447ae945d48d2b87d913fb3bae as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae 2024-12-07T17:23:24,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/.tmp/cf/05382327c7704055b889b7ede1f8eb1f as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f 2024-12-07T17:23:24,325 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae, entries=45, sequenceid=5, filesize=8.0 K 2024-12-07T17:23:24,326 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 07546b523f5cec3721a2f1e7b7bbbe84 in 76ms, sequenceid=5, compaction requested=false 2024-12-07T17:23:24,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-07T17:23:24,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 07546b523f5cec3721a2f1e7b7bbbe84: 2024-12-07T17:23:24,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. for snaptb-testExportWithResetTtl completed. 2024-12-07T17:23:24,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-07T17:23:24,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:24,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae] hfiles 2024-12-07T17:23:24,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae for snapshot=snaptb-testExportWithResetTtl 2024-12-07T17:23:24,330 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f, entries=5, sequenceid=5, filesize=5.3 K 2024-12-07T17:23:24,332 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 69368cd96183d60dedbf4e544ddc6bec in 83ms, sequenceid=5, compaction requested=false 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 69368cd96183d60dedbf4e544ddc6bec: 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. for snaptb-testExportWithResetTtl completed. 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f] hfiles 2024-12-07T17:23:24,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f for snapshot=snaptb-testExportWithResetTtl 2024-12-07T17:23:24,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741917_1093 (size=100) 2024-12-07T17:23:24,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741917_1093 (size=100) 2024-12-07T17:23:24,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741917_1093 (size=100) 2024-12-07T17:23:24,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:24,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-07T17:23:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-07T17:23:24,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:24,349 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:24,352 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84 in 254 msec 2024-12-07T17:23:24,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741918_1094 (size=100) 2024-12-07T17:23:24,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741918_1094 (size=100) 2024-12-07T17:23:24,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741918_1094 (size=100) 2024-12-07T17:23:24,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:24,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-07T17:23:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-07T17:23:24,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:24,367 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T17:23:24,370 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-07T17:23:24,370 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 69368cd96183d60dedbf4e544ddc6bec in 272 msec 2024-12-07T17:23:24,370 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:23:24,370 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:23:24,371 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:23:24,371 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-07T17:23:24,372 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T17:23:24,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741919_1095 (size=600) 2024-12-07T17:23:24,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741919_1095 (size=600) 2024-12-07T17:23:24,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741919_1095 (size=600) 2024-12-07T17:23:24,388 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:23:24,396 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:23:24,396 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T17:23:24,398 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:23:24,398 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-07T17:23:24,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 337 msec 2024-12-07T17:23:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T17:23:24,669 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-07T17:23:24,687 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687 2024-12-07T17:23:24,687 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:24,742 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:24,742 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T17:23:24,744 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:23:24,756 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T17:23:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741920_1096 (size=143) 2024-12-07T17:23:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741920_1096 (size=143) 2024-12-07T17:23:24,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741920_1096 (size=143) 2024-12-07T17:23:24,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741921_1097 (size=600) 2024-12-07T17:23:24,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741921_1097 (size=600) 2024-12-07T17:23:24,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741921_1097 (size=600) 2024-12-07T17:23:24,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741922_1098 (size=141) 2024-12-07T17:23:24,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741922_1098 (size=141) 2024-12-07T17:23:24,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741922_1098 (size=141) 2024-12-07T17:23:24,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:24,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:24,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:24,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-07T17:23:24,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-07T17:23:24,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-07T17:23:24,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-07T17:23:24,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-07T17:23:25,615 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000001/launch_container.sh] 2024-12-07T17:23:25,616 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000001/container_tokens] 2024-12-07T17:23:25,616 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0001/container_1733592173099_0001_01_000001/sysfs] 2024-12-07T17:23:25,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0001_000001 (auth:SIMPLE) from 127.0.0.1:35626 2024-12-07T17:23:26,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-12510553692295787569.jar 2024-12-07T17:23:26,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-4315557367717655828.jar 2024-12-07T17:23:26,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:26,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:23:26,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:23:26,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:23:26,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:23:26,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:23:26,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:23:26,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:23:26,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:23:26,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:23:26,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:23:26,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:23:26,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:23:26,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:26,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:26,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:26,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:26,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:26,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:26,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:26,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741923_1099 (size=127628) 2024-12-07T17:23:26,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741923_1099 (size=127628) 2024-12-07T17:23:26,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741923_1099 (size=127628) 2024-12-07T17:23:26,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T17:23:26,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T17:23:26,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T17:23:26,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741925_1101 (size=213228) 2024-12-07T17:23:26,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741925_1101 (size=213228) 2024-12-07T17:23:26,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741925_1101 (size=213228) 2024-12-07T17:23:26,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T17:23:26,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T17:23:26,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T17:23:26,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741927_1103 (size=533455) 2024-12-07T17:23:26,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741927_1103 (size=533455) 2024-12-07T17:23:26,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741927_1103 (size=533455) 2024-12-07T17:23:26,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T17:23:26,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T17:23:26,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T17:23:26,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T17:23:26,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T17:23:26,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T17:23:26,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741930_1106 (size=451756) 2024-12-07T17:23:26,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741930_1106 (size=451756) 2024-12-07T17:23:26,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741930_1106 (size=451756) 2024-12-07T17:23:26,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741931_1107 (size=20406) 2024-12-07T17:23:26,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741931_1107 (size=20406) 2024-12-07T17:23:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741931_1107 (size=20406) 2024-12-07T17:23:26,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741932_1108 (size=75495) 2024-12-07T17:23:26,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741932_1108 (size=75495) 2024-12-07T17:23:26,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741932_1108 (size=75495) 2024-12-07T17:23:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741933_1109 (size=45609) 2024-12-07T17:23:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741933_1109 (size=45609) 2024-12-07T17:23:26,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741933_1109 (size=45609) 2024-12-07T17:23:26,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741934_1110 (size=110084) 2024-12-07T17:23:26,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741934_1110 (size=110084) 2024-12-07T17:23:26,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741934_1110 (size=110084) 2024-12-07T17:23:26,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T17:23:26,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T17:23:26,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T17:23:26,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741936_1112 (size=23076) 2024-12-07T17:23:26,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741936_1112 (size=23076) 2024-12-07T17:23:26,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741936_1112 (size=23076) 2024-12-07T17:23:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741937_1113 (size=126803) 2024-12-07T17:23:26,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741937_1113 (size=126803) 2024-12-07T17:23:26,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741937_1113 (size=126803) 2024-12-07T17:23:26,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741938_1114 (size=322274) 2024-12-07T17:23:26,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741938_1114 (size=322274) 2024-12-07T17:23:26,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741938_1114 (size=322274) 2024-12-07T17:23:26,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741939_1115 (size=1832290) 2024-12-07T17:23:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741939_1115 (size=1832290) 2024-12-07T17:23:26,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741939_1115 (size=1832290) 2024-12-07T17:23:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741940_1116 (size=30081) 2024-12-07T17:23:26,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741940_1116 (size=30081) 2024-12-07T17:23:26,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741940_1116 (size=30081) 2024-12-07T17:23:26,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741941_1117 (size=53616) 2024-12-07T17:23:26,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741941_1117 (size=53616) 2024-12-07T17:23:26,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741941_1117 (size=53616) 2024-12-07T17:23:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741942_1118 (size=29229) 2024-12-07T17:23:26,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741942_1118 (size=29229) 2024-12-07T17:23:26,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741942_1118 (size=29229) 2024-12-07T17:23:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741943_1119 (size=169089) 2024-12-07T17:23:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741943_1119 (size=169089) 2024-12-07T17:23:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741943_1119 (size=169089) 2024-12-07T17:23:26,944 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:23:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741944_1120 (size=6350154) 2024-12-07T17:23:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741944_1120 (size=6350154) 2024-12-07T17:23:27,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741944_1120 (size=6350154) 2024-12-07T17:23:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T17:23:27,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T17:23:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T17:23:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741946_1122 (size=136454) 2024-12-07T17:23:27,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741946_1122 (size=136454) 2024-12-07T17:23:27,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741946_1122 (size=136454) 2024-12-07T17:23:27,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741947_1123 (size=907853) 2024-12-07T17:23:27,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741947_1123 (size=907853) 2024-12-07T17:23:27,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741947_1123 (size=907853) 2024-12-07T17:23:27,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T17:23:27,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T17:23:27,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T17:23:27,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741949_1125 (size=503880) 2024-12-07T17:23:27,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741949_1125 (size=503880) 2024-12-07T17:23:27,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741949_1125 (size=503880) 2024-12-07T17:23:27,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T17:23:27,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T17:23:27,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T17:23:27,523 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:23:27,527 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-07T17:23:27,529 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:23:27,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741951_1127 (size=324) 2024-12-07T17:23:27,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741951_1127 (size=324) 2024-12-07T17:23:27,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741951_1127 (size=324) 2024-12-07T17:23:27,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741952_1128 (size=15) 2024-12-07T17:23:27,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741952_1128 (size=15) 2024-12-07T17:23:27,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741952_1128 (size=15) 2024-12-07T17:23:27,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741953_1129 (size=304879) 2024-12-07T17:23:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741953_1129 (size=304879) 2024-12-07T17:23:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741953_1129 (size=304879) 2024-12-07T17:23:27,647 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:27,647 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:28,386 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0002_000001 (auth:SIMPLE) from 127.0.0.1:35980 2024-12-07T17:23:30,159 INFO [master/5c70517e8267:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T17:23:30,159 INFO [master/5c70517e8267:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T17:23:35,775 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0002_000001 (auth:SIMPLE) from 127.0.0.1:44138 2024-12-07T17:23:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741954_1130 (size=350553) 2024-12-07T17:23:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741954_1130 (size=350553) 2024-12-07T17:23:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741954_1130 (size=350553) 2024-12-07T17:23:38,140 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0002_000001 (auth:SIMPLE) from 127.0.0.1:42858 2024-12-07T17:23:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741955_1131 (size=8188) 2024-12-07T17:23:42,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741955_1131 (size=8188) 2024-12-07T17:23:42,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741955_1131 (size=8188) 2024-12-07T17:23:42,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741956_1132 (size=5422) 2024-12-07T17:23:42,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741956_1132 (size=5422) 2024-12-07T17:23:42,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741956_1132 (size=5422) 2024-12-07T17:23:42,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741957_1133 (size=17398) 2024-12-07T17:23:42,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741957_1133 (size=17398) 2024-12-07T17:23:42,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741957_1133 (size=17398) 2024-12-07T17:23:42,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741958_1134 (size=461) 2024-12-07T17:23:42,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741958_1134 (size=461) 2024-12-07T17:23:42,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741958_1134 (size=461) 2024-12-07T17:23:42,451 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000002/launch_container.sh] 2024-12-07T17:23:42,451 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000002/container_tokens] 2024-12-07T17:23:42,451 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000002/sysfs] 2024-12-07T17:23:42,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741959_1135 (size=17398) 2024-12-07T17:23:42,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741959_1135 (size=17398) 2024-12-07T17:23:42,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741959_1135 (size=17398) 2024-12-07T17:23:42,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741960_1136 (size=350553) 2024-12-07T17:23:42,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741960_1136 (size=350553) 2024-12-07T17:23:42,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741960_1136 (size=350553) 2024-12-07T17:23:42,557 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0002_000001 (auth:SIMPLE) from 127.0.0.1:42868 2024-12-07T17:23:43,249 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:23:44,117 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:23:44,118 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:23:44,127 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-07T17:23:44,128 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:23:44,129 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:23:44,129 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T17:23:44,129 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-07T17:23:44,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-07T17:23:44,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T17:23:44,135 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-07T17:23:44,135 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592204687/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-07T17:23:44,145 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-07T17:23:44,146 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-07T17:23:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T17:23:44,150 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592224150"}]},"ts":"1733592224150"} 2024-12-07T17:23:44,153 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-07T17:23:44,158 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-07T17:23:44,159 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-07T17:23:44,161 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, UNASSIGN}] 2024-12-07T17:23:44,161 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, UNASSIGN 2024-12-07T17:23:44,162 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, UNASSIGN 2024-12-07T17:23:44,163 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=07546b523f5cec3721a2f1e7b7bbbe84, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:44,164 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=69368cd96183d60dedbf4e544ddc6bec, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:44,165 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:44,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:23:44,167 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:44,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 69368cd96183d60dedbf4e544ddc6bec, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:44,248 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=5c70517e8267,36163,1733592165482, timeout=1733592224318, delay=70, operations=[pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84, server=5c70517e8267,36163,1733592165482], containedObject=5c70517e8267,38423,1733592165361, timeout=1733592224319, delay=71, operations=[pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 69368cd96183d60dedbf4e544ddc6bec, server=5c70517e8267,38423,1733592165361]] 2024-12-07T17:23:44,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T17:23:44,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:44,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:44,320 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:44,321 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:44,321 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 69368cd96183d60dedbf4e544ddc6bec, disabling compactions & flushes 2024-12-07T17:23:44,321 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:44,321 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:44,321 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. after waiting 0 ms 2024-12-07T17:23:44,321 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:44,324 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:44,324 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:44,324 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 07546b523f5cec3721a2f1e7b7bbbe84, disabling compactions & flushes 2024-12-07T17:23:44,324 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:44,324 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:44,325 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. after waiting 0 ms 2024-12-07T17:23:44,325 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:44,353 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:23:44,354 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:44,354 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec. 2024-12-07T17:23:44,355 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 69368cd96183d60dedbf4e544ddc6bec: 2024-12-07T17:23:44,356 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:23:44,357 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:44,357 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:44,358 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84. 2024-12-07T17:23:44,358 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 07546b523f5cec3721a2f1e7b7bbbe84: 2024-12-07T17:23:44,358 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=69368cd96183d60dedbf4e544ddc6bec, regionState=CLOSED 2024-12-07T17:23:44,360 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:44,361 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=07546b523f5cec3721a2f1e7b7bbbe84, regionState=CLOSED 2024-12-07T17:23:44,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-07T17:23:44,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-07T17:23:44,368 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 07546b523f5cec3721a2f1e7b7bbbe84, server=5c70517e8267,36163,1733592165482 in 199 msec 2024-12-07T17:23:44,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=69368cd96183d60dedbf4e544ddc6bec, UNASSIGN in 205 msec 2024-12-07T17:23:44,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 69368cd96183d60dedbf4e544ddc6bec, server=5c70517e8267,38423,1733592165361 in 194 msec 2024-12-07T17:23:44,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-07T17:23:44,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=07546b523f5cec3721a2f1e7b7bbbe84, UNASSIGN in 208 msec 2024-12-07T17:23:44,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-07T17:23:44,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 213 msec 2024-12-07T17:23:44,376 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592224376"}]},"ts":"1733592224376"} 2024-12-07T17:23:44,378 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-07T17:23:44,381 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-07T17:23:44,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 236 msec 2024-12-07T17:23:44,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T17:23:44,454 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-07T17:23:44,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-07T17:23:44,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,457 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-07T17:23:44,458 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,460 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-07T17:23:44,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T17:23:44,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T17:23:44,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T17:23:44,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T17:23:44,467 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:44,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-07T17:23:44,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,472 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-07T17:23:44,472 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,472 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-07T17:23:44,472 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-07T17:23:44,473 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,474 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:44,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:44,475 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:44,476 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:44,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T17:23:44,478 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/recovered.edits] 2024-12-07T17:23:44,480 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:44,483 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/recovered.edits] 2024-12-07T17:23:44,489 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/cf/05382327c7704055b889b7ede1f8eb1f 2024-12-07T17:23:44,492 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/cf/45b5a6447ae945d48d2b87d913fb3bae 2024-12-07T17:23:44,494 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/recovered.edits/8.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec/recovered.edits/8.seqid 2024-12-07T17:23:44,495 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/69368cd96183d60dedbf4e544ddc6bec 2024-12-07T17:23:44,497 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/recovered.edits/8.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84/recovered.edits/8.seqid 2024-12-07T17:23:44,497 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportWithResetTtl/07546b523f5cec3721a2f1e7b7bbbe84 2024-12-07T17:23:44,498 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-07T17:23:44,501 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,510 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-07T17:23:44,514 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-07T17:23:44,516 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,516 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-07T17:23:44,517 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592224516"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,517 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592224516"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,522 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:23:44,522 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 69368cd96183d60dedbf4e544ddc6bec, NAME => 'testExportWithResetTtl,,1733592203350.69368cd96183d60dedbf4e544ddc6bec.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 07546b523f5cec3721a2f1e7b7bbbe84, NAME => 'testExportWithResetTtl,1,1733592203350.07546b523f5cec3721a2f1e7b7bbbe84.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:23:44,522 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-07T17:23:44,522 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592224522"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,530 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-07T17:23:44,534 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T17:23:44,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 80 msec 2024-12-07T17:23:44,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T17:23:44,578 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-07T17:23:44,579 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-07T17:23:44,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-07T17:23:44,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T17:23:44,590 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592224589"}]},"ts":"1733592224589"} 2024-12-07T17:23:44,591 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-07T17:23:44,596 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-07T17:23:44,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-07T17:23:44,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, UNASSIGN}] 2024-12-07T17:23:44,600 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, UNASSIGN 2024-12-07T17:23:44,600 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, UNASSIGN 2024-12-07T17:23:44,601 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=a1332a7519281371dde400bf41911c38, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:44,601 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=b015e76782accb7dfc05754188145804, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:44,604 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:44,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE; CloseRegionProcedure b015e76782accb7dfc05754188145804, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:23:44,605 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:23:44,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE; CloseRegionProcedure a1332a7519281371dde400bf41911c38, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:44,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T17:23:44,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:44,757 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close b015e76782accb7dfc05754188145804 2024-12-07T17:23:44,757 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:44,757 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing b015e76782accb7dfc05754188145804, disabling compactions & flushes 2024-12-07T17:23:44,757 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:44,758 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:44,758 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. after waiting 0 ms 2024-12-07T17:23:44,758 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:44,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:44,760 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close a1332a7519281371dde400bf41911c38 2024-12-07T17:23:44,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:23:44,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing a1332a7519281371dde400bf41911c38, disabling compactions & flushes 2024-12-07T17:23:44,761 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:44,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:44,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. after waiting 0 ms 2024-12-07T17:23:44,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:44,779 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:23:44,780 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:44,780 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804. 2024-12-07T17:23:44,780 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for b015e76782accb7dfc05754188145804: 2024-12-07T17:23:44,788 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed b015e76782accb7dfc05754188145804 2024-12-07T17:23:44,788 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=b015e76782accb7dfc05754188145804, regionState=CLOSED 2024-12-07T17:23:44,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=58 2024-12-07T17:23:44,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=58, state=SUCCESS; CloseRegionProcedure b015e76782accb7dfc05754188145804, server=5c70517e8267,36163,1733592165482 in 187 msec 2024-12-07T17:23:44,795 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=b015e76782accb7dfc05754188145804, UNASSIGN in 195 msec 2024-12-07T17:23:44,798 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:23:44,799 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:23:44,799 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38. 2024-12-07T17:23:44,799 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for a1332a7519281371dde400bf41911c38: 2024-12-07T17:23:44,801 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed a1332a7519281371dde400bf41911c38 2024-12-07T17:23:44,806 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=a1332a7519281371dde400bf41911c38, regionState=CLOSED 2024-12-07T17:23:44,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=59 2024-12-07T17:23:44,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=59, state=SUCCESS; CloseRegionProcedure a1332a7519281371dde400bf41911c38, server=5c70517e8267,38423,1733592165361 in 202 msec 2024-12-07T17:23:44,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=57 2024-12-07T17:23:44,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=a1332a7519281371dde400bf41911c38, UNASSIGN in 211 msec 2024-12-07T17:23:44,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-07T17:23:44,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 216 msec 2024-12-07T17:23:44,817 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592224817"}]},"ts":"1733592224817"} 2024-12-07T17:23:44,819 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-07T17:23:44,821 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-07T17:23:44,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 243 msec 2024-12-07T17:23:44,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-07T17:23:44,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-07T17:23:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T17:23:44,896 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-07T17:23:44,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-07T17:23:44,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,900 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-07T17:23:44,902 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-07T17:23:44,904 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,907 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-07T17:23:44,907 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-07T17:23:44,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-07T17:23:44,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T17:23:44,908 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:44,909 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-07T17:23:44,909 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:23:44,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-07T17:23:44,910 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38 2024-12-07T17:23:44,912 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804 2024-12-07T17:23:44,913 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/recovered.edits] 2024-12-07T17:23:44,915 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/recovered.edits] 2024-12-07T17:23:44,925 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/cf/caeccdd612984aec8d8892411a0e2690 2024-12-07T17:23:44,927 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/cf/1bbe96cd9d9c4d8a936c5e9b91f64e13 2024-12-07T17:23:44,930 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38/recovered.edits/9.seqid 2024-12-07T17:23:44,930 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/a1332a7519281371dde400bf41911c38 2024-12-07T17:23:44,932 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804/recovered.edits/9.seqid 2024-12-07T17:23:44,932 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithResetTtl/b015e76782accb7dfc05754188145804 2024-12-07T17:23:44,932 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-07T17:23:44,935 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,940 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-07T17:23:44,942 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-07T17:23:44,943 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,943 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-07T17:23:44,944 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592224943"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,944 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592224943"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,946 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:23:44,946 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b015e76782accb7dfc05754188145804, NAME => 'testtb-testExportWithResetTtl,,1733592201717.b015e76782accb7dfc05754188145804.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a1332a7519281371dde400bf41911c38, NAME => 'testtb-testExportWithResetTtl,1,1733592201717.a1332a7519281371dde400bf41911c38.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:23:44,946 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-07T17:23:44,946 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592224946"}]},"ts":"9223372036854775807"} 2024-12-07T17:23:44,948 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-07T17:23:44,951 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T17:23:44,953 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 53 msec 2024-12-07T17:23:45,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-07T17:23:45,011 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-07T17:23:45,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-07T17:23:45,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-07T17:23:45,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-07T17:23:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-07T17:23:45,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-07T17:23:45,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-07T17:23:45,064 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=783 (was 774) Potentially hanging thread: process reaper (pid 21852) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:42513 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_299578331_1 at /127.0.0.1:41824 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_299578331_1 at /127.0.0.1:47400 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42513 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:56258 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:54314 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40703 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2146 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:32948 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=791 (was 797), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=584 (was 539) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=5349 (was 5652) 2024-12-07T17:23:45,064 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-07T17:23:45,089 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=783, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=584, ProcessCount=17, AvailableMemoryMB=5348 2024-12-07T17:23:45,089 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-12-07T17:23:45,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:23:45,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:23:45,094 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:23:45,094 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:45,094 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-07T17:23:45,095 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:23:45,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T17:23:45,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741961_1137 (size=407) 2024-12-07T17:23:45,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741961_1137 (size=407) 2024-12-07T17:23:45,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741961_1137 (size=407) 2024-12-07T17:23:45,112 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 67d849137cd823427d9f84b3054e3036, NAME => 'testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:45,112 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 9ff8778d1820ef63e4a5355c41b0a4fa, NAME => 'testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:45,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741962_1138 (size=68) 2024-12-07T17:23:45,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741962_1138 (size=68) 2024-12-07T17:23:45,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741962_1138 (size=68) 2024-12-07T17:23:45,132 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:45,132 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 67d849137cd823427d9f84b3054e3036, disabling compactions & flushes 2024-12-07T17:23:45,132 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,132 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,132 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. after waiting 0 ms 2024-12-07T17:23:45,132 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,133 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,133 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 67d849137cd823427d9f84b3054e3036: 2024-12-07T17:23:45,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741963_1139 (size=68) 2024-12-07T17:23:45,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741963_1139 (size=68) 2024-12-07T17:23:45,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741963_1139 (size=68) 2024-12-07T17:23:45,136 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:45,137 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 9ff8778d1820ef63e4a5355c41b0a4fa, disabling compactions & flushes 2024-12-07T17:23:45,137 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,137 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,137 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. after waiting 0 ms 2024-12-07T17:23:45,137 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,137 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,137 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 9ff8778d1820ef63e4a5355c41b0a4fa: 2024-12-07T17:23:45,138 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:23:45,139 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733592225138"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592225138"}]},"ts":"1733592225138"} 2024-12-07T17:23:45,139 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733592225138"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592225138"}]},"ts":"1733592225138"} 2024-12-07T17:23:45,142 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:23:45,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:23:45,144 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592225144"}]},"ts":"1733592225144"} 2024-12-07T17:23:45,146 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-07T17:23:45,150 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:23:45,152 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:23:45,152 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:23:45,152 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:23:45,152 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:23:45,152 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:23:45,152 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:23:45,152 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:23:45,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, ASSIGN}] 2024-12-07T17:23:45,154 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, ASSIGN 2024-12-07T17:23:45,154 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, ASSIGN 2024-12-07T17:23:45,155 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:23:45,155 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:23:45,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T17:23:45,306 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:23:45,307 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=67d849137cd823427d9f84b3054e3036, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:45,307 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9ff8778d1820ef63e4a5355c41b0a4fa, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:45,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE; OpenRegionProcedure 67d849137cd823427d9f84b3054e3036, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:23:45,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE; OpenRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:23:45,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T17:23:45,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:45,462 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:45,465 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,465 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,466 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 67d849137cd823427d9f84b3054e3036, NAME => 'testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:23:45,466 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => 9ff8778d1820ef63e4a5355c41b0a4fa, NAME => 'testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:23:45,466 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. service=AccessControlService 2024-12-07T17:23:45,466 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. service=AccessControlService 2024-12-07T17:23:45,466 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:45,466 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,467 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,469 INFO [StoreOpener-67d849137cd823427d9f84b3054e3036-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,469 INFO [StoreOpener-9ff8778d1820ef63e4a5355c41b0a4fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,470 INFO [StoreOpener-9ff8778d1820ef63e4a5355c41b0a4fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9ff8778d1820ef63e4a5355c41b0a4fa columnFamilyName cf 2024-12-07T17:23:45,471 INFO [StoreOpener-67d849137cd823427d9f84b3054e3036-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 67d849137cd823427d9f84b3054e3036 columnFamilyName cf 2024-12-07T17:23:45,471 DEBUG [StoreOpener-9ff8778d1820ef63e4a5355c41b0a4fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:45,471 DEBUG [StoreOpener-67d849137cd823427d9f84b3054e3036-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:23:45,471 INFO [StoreOpener-67d849137cd823427d9f84b3054e3036-1 {}] regionserver.HStore(327): Store=67d849137cd823427d9f84b3054e3036/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:45,471 INFO [StoreOpener-9ff8778d1820ef63e4a5355c41b0a4fa-1 {}] regionserver.HStore(327): Store=9ff8778d1820ef63e4a5355c41b0a4fa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:23:45,472 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,473 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,473 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,473 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,475 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,475 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,478 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:45,479 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:23:45,479 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 67d849137cd823427d9f84b3054e3036; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63776429, jitterRate=-0.049657151103019714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:45,479 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened 9ff8778d1820ef63e4a5355c41b0a4fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61166880, jitterRate=-0.08854246139526367}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:23:45,480 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for 9ff8778d1820ef63e4a5355c41b0a4fa: 2024-12-07T17:23:45,480 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 67d849137cd823427d9f84b3054e3036: 2024-12-07T17:23:45,481 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa., pid=67, masterSystemTime=1733592225462 2024-12-07T17:23:45,481 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036., pid=66, masterSystemTime=1733592225461 2024-12-07T17:23:45,482 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,482 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=9ff8778d1820ef63e4a5355c41b0a4fa, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:23:45,483 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,483 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,484 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=67d849137cd823427d9f84b3054e3036, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:23:45,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=65 2024-12-07T17:23:45,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=65, state=SUCCESS; OpenRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa, server=5c70517e8267,38423,1733592165361 in 175 msec 2024-12-07T17:23:45,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=64 2024-12-07T17:23:45,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=64, state=SUCCESS; OpenRegionProcedure 67d849137cd823427d9f84b3054e3036, server=5c70517e8267,36163,1733592165482 in 177 msec 2024-12-07T17:23:45,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, ASSIGN in 333 msec 2024-12-07T17:23:45,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-07T17:23:45,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, ASSIGN in 335 msec 2024-12-07T17:23:45,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:23:45,491 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592225491"}]},"ts":"1733592225491"} 2024-12-07T17:23:45,492 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-07T17:23:45,495 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:23:45,495 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-07T17:23:45,498 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:23:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:45,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:23:45,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:45,501 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:45,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:45,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:23:45,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 411 msec 2024-12-07T17:23:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T17:23:45,700 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-07T17:23:45,700 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-07T17:23:45,700 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:45,705 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-07T17:23:45,705 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:45,705 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-07T17:23:45,709 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:23:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592225709 (current time:1733592225709). 2024-12-07T17:23:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:23:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-07T17:23:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:23:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x73b89b4e to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1fb5716b 2024-12-07T17:23:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b4cc7b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:45,718 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34152, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x73b89b4e to 127.0.0.1:58133 2024-12-07T17:23:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f246122 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19874099 2024-12-07T17:23:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543b457a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:45,726 DEBUG [hconnection-0x7df8185f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:45,727 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:45,730 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f246122 to 127.0.0.1:58133 2024-12-07T17:23:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:23:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:23:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:23:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-07T17:23:45,734 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:23:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T17:23:45,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:23:45,737 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:23:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741964_1140 (size=170) 2024-12-07T17:23:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741964_1140 (size=170) 2024-12-07T17:23:45,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741964_1140 (size=170) 2024-12-07T17:23:45,755 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:23:45,755 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa}] 2024-12-07T17:23:45,756 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,756 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T17:23:45,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:45,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:45,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-07T17:23:45,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-07T17:23:45,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for 67d849137cd823427d9f84b3054e3036: 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 9ff8778d1820ef63e4a5355c41b0a4fa: 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. for emptySnaptb0-testExportFileSystemState completed. 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. for emptySnaptb0-testExportFileSystemState completed. 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:23:45,909 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:23:45,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741965_1141 (size=71) 2024-12-07T17:23:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741965_1141 (size=71) 2024-12-07T17:23:45,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741965_1141 (size=71) 2024-12-07T17:23:45,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:45,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-07T17:23:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-07T17:23:45,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,924 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:45,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 in 170 msec 2024-12-07T17:23:45,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741966_1142 (size=71) 2024-12-07T17:23:45,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741966_1142 (size=71) 2024-12-07T17:23:45,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741966_1142 (size=71) 2024-12-07T17:23:45,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:45,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-07T17:23:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-07T17:23:45,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,932 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:45,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68 2024-12-07T17:23:45,937 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:23:45,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa in 178 msec 2024-12-07T17:23:45,938 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:23:45,939 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:23:45,939 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-07T17:23:45,940 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-07T17:23:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741967_1143 (size=552) 2024-12-07T17:23:45,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741967_1143 (size=552) 2024-12-07T17:23:45,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741967_1143 (size=552) 2024-12-07T17:23:45,956 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:23:45,961 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:23:45,961 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-07T17:23:45,963 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:23:45,963 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-07T17:23:45,964 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 231 msec 2024-12-07T17:23:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T17:23:46,037 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-07T17:23:46,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:46,048 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:23:46,053 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-07T17:23:46,053 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:46,053 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:23:46,066 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:23:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592226066 (current time:1733592226066). 2024-12-07T17:23:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:23:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-07T17:23:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:23:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c87c129 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1801c06 2024-12-07T17:23:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5abf3b46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:46,074 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34164, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c87c129 to 127.0.0.1:58133 2024-12-07T17:23:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x20c6c407 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@21a79c35 2024-12-07T17:23:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e1e64c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:23:46,085 DEBUG [hconnection-0x76e0a459-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:46,086 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:23:46,089 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:23:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x20c6c407 to 127.0.0.1:58133 2024-12-07T17:23:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:23:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:23:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:23:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:23:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-07T17:23:46,093 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:23:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T17:23:46,094 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:23:46,096 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:23:46,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741968_1144 (size=165) 2024-12-07T17:23:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741968_1144 (size=165) 2024-12-07T17:23:46,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741968_1144 (size=165) 2024-12-07T17:23:46,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:23:46,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa}] 2024-12-07T17:23:46,111 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:46,112 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T17:23:46,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:23:46,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:23:46,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-07T17:23:46,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-07T17:23:46,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:46,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:46,264 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 67d849137cd823427d9f84b3054e3036 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-07T17:23:46,264 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 9ff8778d1820ef63e4a5355c41b0a4fa 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-07T17:23:46,283 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/.tmp/cf/58a755d80c5d42a08b6d052e1cd922b6 is 71, key is 12234cc27c6f78a68bc57256b6483f04/cf:q/1733592226048/Put/seqid=0 2024-12-07T17:23:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741969_1145 (size=8256) 2024-12-07T17:23:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741969_1145 (size=8256) 2024-12-07T17:23:46,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741969_1145 (size=8256) 2024-12-07T17:23:46,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/.tmp/cf/d6430b9d0a0c4113bac1366fd115897d is 71, key is 030242a7374173b871073a9c01389e6a/cf:q/1733592226045/Put/seqid=0 2024-12-07T17:23:46,290 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/.tmp/cf/58a755d80c5d42a08b6d052e1cd922b6 2024-12-07T17:23:46,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741970_1146 (size=5356) 2024-12-07T17:23:46,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741970_1146 (size=5356) 2024-12-07T17:23:46,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741970_1146 (size=5356) 2024-12-07T17:23:46,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/.tmp/cf/d6430b9d0a0c4113bac1366fd115897d 2024-12-07T17:23:46,297 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/.tmp/cf/58a755d80c5d42a08b6d052e1cd922b6 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6 2024-12-07T17:23:46,303 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6, entries=46, sequenceid=6, filesize=8.1 K 2024-12-07T17:23:46,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/.tmp/cf/d6430b9d0a0c4113bac1366fd115897d as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d 2024-12-07T17:23:46,304 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 9ff8778d1820ef63e4a5355c41b0a4fa in 40ms, sequenceid=6, compaction requested=false 2024-12-07T17:23:46,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 9ff8778d1820ef63e4a5355c41b0a4fa: 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. for snaptb0-testExportFileSystemState completed. 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6] hfiles 2024-12-07T17:23:46,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6 for snapshot=snaptb0-testExportFileSystemState 2024-12-07T17:23:46,316 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d, entries=4, sequenceid=6, filesize=5.2 K 2024-12-07T17:23:46,317 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 67d849137cd823427d9f84b3054e3036 in 53ms, sequenceid=6, compaction requested=false 2024-12-07T17:23:46,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 67d849137cd823427d9f84b3054e3036: 2024-12-07T17:23:46,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. for snaptb0-testExportFileSystemState completed. 2024-12-07T17:23:46,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-07T17:23:46,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:23:46,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d] hfiles 2024-12-07T17:23:46,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d for snapshot=snaptb0-testExportFileSystemState 2024-12-07T17:23:46,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741971_1147 (size=110) 2024-12-07T17:23:46,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741971_1147 (size=110) 2024-12-07T17:23:46,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741971_1147 (size=110) 2024-12-07T17:23:46,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:23:46,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-07T17:23:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-07T17:23:46,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:46,324 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:23:46,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741972_1148 (size=110) 2024-12-07T17:23:46,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741972_1148 (size=110) 2024-12-07T17:23:46,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741972_1148 (size=110) 2024-12-07T17:23:46,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa in 215 msec 2024-12-07T17:23:46,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:23:46,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-07T17:23:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-07T17:23:46,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:46,329 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 2024-12-07T17:23:46,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-07T17:23:46,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure 67d849137cd823427d9f84b3054e3036 in 219 msec 2024-12-07T17:23:46,331 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:23:46,332 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:23:46,332 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:23:46,332 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-07T17:23:46,333 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T17:23:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741973_1149 (size=630) 2024-12-07T17:23:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741973_1149 (size=630) 2024-12-07T17:23:46,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741973_1149 (size=630) 2024-12-07T17:23:46,354 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:23:46,360 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:23:46,360 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T17:23:46,362 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:23:46,362 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-07T17:23:46,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 271 msec 2024-12-07T17:23:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T17:23:46,395 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-07T17:23:46,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396 2024-12-07T17:23:46,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:46,445 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:23:46,446 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T17:23:46,448 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:23:46,453 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T17:23:46,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741974_1150 (size=165) 2024-12-07T17:23:46,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741974_1150 (size=165) 2024-12-07T17:23:46,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741974_1150 (size=165) 2024-12-07T17:23:46,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741975_1151 (size=630) 2024-12-07T17:23:46,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741975_1151 (size=630) 2024-12-07T17:23:46,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741975_1151 (size=630) 2024-12-07T17:23:46,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:46,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:46,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:46,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,629 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-12902196802621330603.jar 2024-12-07T17:23:47,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-10683121548202117369.jar 2024-12-07T17:23:47,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:23:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:23:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:23:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:23:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:23:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:23:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:23:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:23:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:23:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:23:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:23:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:23:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:23:47,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:47,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:47,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:47,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:47,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:23:47,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:47,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:23:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741976_1152 (size=451756) 2024-12-07T17:23:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741976_1152 (size=451756) 2024-12-07T17:23:47,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741976_1152 (size=451756) 2024-12-07T17:23:47,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741977_1153 (size=127628) 2024-12-07T17:23:47,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741977_1153 (size=127628) 2024-12-07T17:23:47,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741977_1153 (size=127628) 2024-12-07T17:23:47,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741978_1154 (size=2172101) 2024-12-07T17:23:47,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741978_1154 (size=2172101) 2024-12-07T17:23:47,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741978_1154 (size=2172101) 2024-12-07T17:23:47,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741979_1155 (size=213228) 2024-12-07T17:23:47,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741979_1155 (size=213228) 2024-12-07T17:23:47,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741979_1155 (size=213228) 2024-12-07T17:23:47,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741980_1156 (size=1877034) 2024-12-07T17:23:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741980_1156 (size=1877034) 2024-12-07T17:23:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741980_1156 (size=1877034) 2024-12-07T17:23:47,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741981_1157 (size=533455) 2024-12-07T17:23:47,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741981_1157 (size=533455) 2024-12-07T17:23:47,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741981_1157 (size=533455) 2024-12-07T17:23:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741982_1158 (size=7280644) 2024-12-07T17:23:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741982_1158 (size=7280644) 2024-12-07T17:23:47,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741982_1158 (size=7280644) 2024-12-07T17:23:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741983_1159 (size=4188619) 2024-12-07T17:23:48,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741983_1159 (size=4188619) 2024-12-07T17:23:48,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741983_1159 (size=4188619) 2024-12-07T17:23:48,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741984_1160 (size=20406) 2024-12-07T17:23:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741984_1160 (size=20406) 2024-12-07T17:23:48,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741984_1160 (size=20406) 2024-12-07T17:23:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741985_1161 (size=75495) 2024-12-07T17:23:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741985_1161 (size=75495) 2024-12-07T17:23:48,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741985_1161 (size=75495) 2024-12-07T17:23:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741986_1162 (size=6350154) 2024-12-07T17:23:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741986_1162 (size=6350154) 2024-12-07T17:23:48,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741986_1162 (size=6350154) 2024-12-07T17:23:48,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741987_1163 (size=45609) 2024-12-07T17:23:48,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741987_1163 (size=45609) 2024-12-07T17:23:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741987_1163 (size=45609) 2024-12-07T17:23:48,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741988_1164 (size=110084) 2024-12-07T17:23:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741988_1164 (size=110084) 2024-12-07T17:23:48,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741988_1164 (size=110084) 2024-12-07T17:23:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741989_1165 (size=1323991) 2024-12-07T17:23:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741989_1165 (size=1323991) 2024-12-07T17:23:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741989_1165 (size=1323991) 2024-12-07T17:23:48,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741990_1166 (size=23076) 2024-12-07T17:23:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741990_1166 (size=23076) 2024-12-07T17:23:48,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741990_1166 (size=23076) 2024-12-07T17:23:48,177 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 27a2410ab2d9b432daa26ec0ea504b68 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:23:48,178 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 6860b2d9133d857f1acf6c9fb2cd82f1 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:23:48,178 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 67d849137cd823427d9f84b3054e3036 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:23:48,178 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9ff8778d1820ef63e4a5355c41b0a4fa changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:23:48,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741991_1167 (size=126803) 2024-12-07T17:23:48,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741991_1167 (size=126803) 2024-12-07T17:23:48,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741991_1167 (size=126803) 2024-12-07T17:23:48,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741992_1168 (size=322274) 2024-12-07T17:23:48,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741992_1168 (size=322274) 2024-12-07T17:23:48,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741992_1168 (size=322274) 2024-12-07T17:23:48,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741993_1169 (size=1832290) 2024-12-07T17:23:48,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741993_1169 (size=1832290) 2024-12-07T17:23:48,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741993_1169 (size=1832290) 2024-12-07T17:23:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741994_1170 (size=30081) 2024-12-07T17:23:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741994_1170 (size=30081) 2024-12-07T17:23:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741994_1170 (size=30081) 2024-12-07T17:23:48,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741995_1171 (size=53616) 2024-12-07T17:23:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741995_1171 (size=53616) 2024-12-07T17:23:48,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741995_1171 (size=53616) 2024-12-07T17:23:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741996_1172 (size=29229) 2024-12-07T17:23:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741996_1172 (size=29229) 2024-12-07T17:23:48,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741996_1172 (size=29229) 2024-12-07T17:23:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741997_1173 (size=169089) 2024-12-07T17:23:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741997_1173 (size=169089) 2024-12-07T17:23:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741997_1173 (size=169089) 2024-12-07T17:23:48,702 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0002_000001 (auth:SIMPLE) from 127.0.0.1:34672 2024-12-07T17:23:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741998_1174 (size=5175431) 2024-12-07T17:23:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741998_1174 (size=5175431) 2024-12-07T17:23:48,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741998_1174 (size=5175431) 2024-12-07T17:23:48,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741999_1175 (size=136454) 2024-12-07T17:23:48,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741999_1175 (size=136454) 2024-12-07T17:23:48,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741999_1175 (size=136454) 2024-12-07T17:23:48,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742000_1176 (size=907853) 2024-12-07T17:23:48,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742000_1176 (size=907853) 2024-12-07T17:23:48,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742000_1176 (size=907853) 2024-12-07T17:23:48,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742001_1177 (size=3317408) 2024-12-07T17:23:48,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742001_1177 (size=3317408) 2024-12-07T17:23:48,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742001_1177 (size=3317408) 2024-12-07T17:23:48,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742002_1178 (size=503880) 2024-12-07T17:23:48,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742002_1178 (size=503880) 2024-12-07T17:23:48,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742002_1178 (size=503880) 2024-12-07T17:23:48,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T17:23:48,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T17:23:48,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T17:23:48,866 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:23:48,870 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-07T17:23:48,873 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:23:48,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742004_1180 (size=344) 2024-12-07T17:23:48,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742004_1180 (size=344) 2024-12-07T17:23:48,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742004_1180 (size=344) 2024-12-07T17:23:48,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742005_1181 (size=15) 2024-12-07T17:23:48,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742005_1181 (size=15) 2024-12-07T17:23:48,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742005_1181 (size=15) 2024-12-07T17:23:48,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742006_1182 (size=304893) 2024-12-07T17:23:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742006_1182 (size=304893) 2024-12-07T17:23:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742006_1182 (size=304893) 2024-12-07T17:23:48,975 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:48,975 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:23:49,631 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0003_000001 (auth:SIMPLE) from 127.0.0.1:43050 2024-12-07T17:23:49,899 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:23:53,812 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000001/launch_container.sh] 2024-12-07T17:23:53,812 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000001/container_tokens] 2024-12-07T17:23:53,812 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0002/container_1733592173099_0002_01_000001/sysfs] 2024-12-07T17:23:54,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-07T17:23:54,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-07T17:23:56,183 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0003_000001 (auth:SIMPLE) from 127.0.0.1:36358 2024-12-07T17:23:56,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742007_1183 (size=350567) 2024-12-07T17:23:56,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742007_1183 (size=350567) 2024-12-07T17:23:56,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742007_1183 (size=350567) 2024-12-07T17:23:58,517 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0003_000001 (auth:SIMPLE) from 127.0.0.1:35952 2024-12-07T17:24:02,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742008_1184 (size=8256) 2024-12-07T17:24:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742008_1184 (size=8256) 2024-12-07T17:24:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742008_1184 (size=8256) 2024-12-07T17:24:02,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742009_1185 (size=5356) 2024-12-07T17:24:02,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742009_1185 (size=5356) 2024-12-07T17:24:02,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742009_1185 (size=5356) 2024-12-07T17:24:02,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742010_1186 (size=17422) 2024-12-07T17:24:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742010_1186 (size=17422) 2024-12-07T17:24:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742010_1186 (size=17422) 2024-12-07T17:24:02,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742011_1187 (size=465) 2024-12-07T17:24:02,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742011_1187 (size=465) 2024-12-07T17:24:02,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742011_1187 (size=465) 2024-12-07T17:24:02,895 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000002/launch_container.sh] 2024-12-07T17:24:02,895 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000002/container_tokens] 2024-12-07T17:24:02,895 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000002/sysfs] 2024-12-07T17:24:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742012_1188 (size=17422) 2024-12-07T17:24:02,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742012_1188 (size=17422) 2024-12-07T17:24:02,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742012_1188 (size=17422) 2024-12-07T17:24:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742013_1189 (size=350567) 2024-12-07T17:24:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742013_1189 (size=350567) 2024-12-07T17:24:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742013_1189 (size=350567) 2024-12-07T17:24:02,981 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0003_000001 (auth:SIMPLE) from 127.0.0.1:35704 2024-12-07T17:24:04,157 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:24:04,159 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:24:04,179 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-07T17:24:04,180 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:24:04,180 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:24:04,181 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T17:24:04,181 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-07T17:24:04,181 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-07T17:24:04,181 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T17:24:04,182 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-07T17:24:04,182 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592226396/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-07T17:24:04,194 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-07T17:24:04,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-07T17:24:04,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T17:24:04,198 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592244198"}]},"ts":"1733592244198"} 2024-12-07T17:24:04,200 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-07T17:24:04,202 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-07T17:24:04,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-07T17:24:04,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, UNASSIGN}] 2024-12-07T17:24:04,206 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, UNASSIGN 2024-12-07T17:24:04,206 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, UNASSIGN 2024-12-07T17:24:04,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=9ff8778d1820ef63e4a5355c41b0a4fa, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:04,206 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=67d849137cd823427d9f84b3054e3036, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:04,208 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:04,208 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:24:04,209 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:04,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure 67d849137cd823427d9f84b3054e3036, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:24:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T17:24:04,361 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:04,361 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:24:04,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing 9ff8778d1820ef63e4a5355c41b0a4fa, disabling compactions & flushes 2024-12-07T17:24:04,362 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. after waiting 0 ms 2024-12-07T17:24:04,362 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close 67d849137cd823427d9f84b3054e3036 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing 67d849137cd823427d9f84b3054e3036, disabling compactions & flushes 2024-12-07T17:24:04,362 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. after waiting 0 ms 2024-12-07T17:24:04,362 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:24:04,384 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:24:04,385 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:04,385 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa. 2024-12-07T17:24:04,385 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for 9ff8778d1820ef63e4a5355c41b0a4fa: 2024-12-07T17:24:04,388 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed 9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:24:04,388 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=9ff8778d1820ef63e4a5355c41b0a4fa, regionState=CLOSED 2024-12-07T17:24:04,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-07T17:24:04,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure 9ff8778d1820ef63e4a5355c41b0a4fa, server=5c70517e8267,38423,1733592165361 in 185 msec 2024-12-07T17:24:04,396 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=9ff8778d1820ef63e4a5355c41b0a4fa, UNASSIGN in 190 msec 2024-12-07T17:24:04,419 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:24:04,425 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:04,425 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036. 2024-12-07T17:24:04,425 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for 67d849137cd823427d9f84b3054e3036: 2024-12-07T17:24:04,427 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed 67d849137cd823427d9f84b3054e3036 2024-12-07T17:24:04,428 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=67d849137cd823427d9f84b3054e3036, regionState=CLOSED 2024-12-07T17:24:04,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-07T17:24:04,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure 67d849137cd823427d9f84b3054e3036, server=5c70517e8267,36163,1733592165482 in 221 msec 2024-12-07T17:24:04,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-07T17:24:04,434 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=67d849137cd823427d9f84b3054e3036, UNASSIGN in 227 msec 2024-12-07T17:24:04,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-07T17:24:04,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 232 msec 2024-12-07T17:24:04,439 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592244439"}]},"ts":"1733592244439"} 2024-12-07T17:24:04,441 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-07T17:24:04,447 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-07T17:24:04,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 254 msec 2024-12-07T17:24:04,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T17:24:04,500 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-07T17:24:04,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-07T17:24:04,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,504 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-07T17:24:04,505 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,507 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-07T17:24:04,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T17:24:04,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T17:24:04,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T17:24:04,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:04,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:04,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T17:24:04,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:04,521 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:24:04,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-07T17:24:04,524 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036 2024-12-07T17:24:04,527 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/recovered.edits] 2024-12-07T17:24:04,531 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/cf/58a755d80c5d42a08b6d052e1cd922b6 2024-12-07T17:24:04,534 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/recovered.edits] 2024-12-07T17:24:04,538 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa/recovered.edits/9.seqid 2024-12-07T17:24:04,538 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/cf/d6430b9d0a0c4113bac1366fd115897d 2024-12-07T17:24:04,539 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/9ff8778d1820ef63e4a5355c41b0a4fa 2024-12-07T17:24:04,543 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036/recovered.edits/9.seqid 2024-12-07T17:24:04,544 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemState/67d849137cd823427d9f84b3054e3036 2024-12-07T17:24:04,544 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-07T17:24:04,547 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,554 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-07T17:24:04,558 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-07T17:24:04,559 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,559 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-07T17:24:04,559 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592244559"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:04,559 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592244559"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:04,562 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:24:04,562 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 67d849137cd823427d9f84b3054e3036, NAME => 'testtb-testExportFileSystemState,,1733592225091.67d849137cd823427d9f84b3054e3036.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9ff8778d1820ef63e4a5355c41b0a4fa, NAME => 'testtb-testExportFileSystemState,1,1733592225091.9ff8778d1820ef63e4a5355c41b0a4fa.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:24:04,562 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-07T17:24:04,562 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592244562"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:04,564 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-07T17:24:04,566 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T17:24:04,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 65 msec 2024-12-07T17:24:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-07T17:24:04,623 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-07T17:24:04,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-07T17:24:04,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-07T17:24:04,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-07T17:24:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-07T17:24:04,667 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=781 (was 783), OpenFileDescriptor=787 (was 791), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=599 (was 584) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=4972 (was 5348) 2024-12-07T17:24:04,667 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-07T17:24:04,699 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=781, OpenFileDescriptor=787, MaxFileDescriptor=1048576, SystemLoadAverage=599, ProcessCount=20, AvailableMemoryMB=4969 2024-12-07T17:24:04,699 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-07T17:24:04,702 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:24:04,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:04,705 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:24:04,705 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:04,706 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:24:04,706 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-07T17:24:04,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T17:24:04,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742014_1190 (size=404) 2024-12-07T17:24:04,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742014_1190 (size=404) 2024-12-07T17:24:04,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742014_1190 (size=404) 2024-12-07T17:24:04,759 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c82b210e70c278e3baa9b29e85c306d7, NAME => 'testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:04,758 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5f343c64200d857de0105da01dc231b3, NAME => 'testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:04,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742015_1191 (size=65) 2024-12-07T17:24:04,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742015_1191 (size=65) 2024-12-07T17:24:04,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742015_1191 (size=65) 2024-12-07T17:24:04,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:04,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing c82b210e70c278e3baa9b29e85c306d7, disabling compactions & flushes 2024-12-07T17:24:04,788 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:04,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:04,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. after waiting 0 ms 2024-12-07T17:24:04,788 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:04,789 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:04,789 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for c82b210e70c278e3baa9b29e85c306d7: 2024-12-07T17:24:04,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742016_1192 (size=65) 2024-12-07T17:24:04,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742016_1192 (size=65) 2024-12-07T17:24:04,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742016_1192 (size=65) 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 5f343c64200d857de0105da01dc231b3, disabling compactions & flushes 2024-12-07T17:24:04,797 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. after waiting 0 ms 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:04,797 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:04,797 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5f343c64200d857de0105da01dc231b3: 2024-12-07T17:24:04,799 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:24:04,799 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592244799"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592244799"}]},"ts":"1733592244799"} 2024-12-07T17:24:04,799 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592244799"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592244799"}]},"ts":"1733592244799"} 2024-12-07T17:24:04,802 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:24:04,803 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:24:04,803 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592244803"}]},"ts":"1733592244803"} 2024-12-07T17:24:04,809 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-07T17:24:04,813 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:24:04,815 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:24:04,815 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:24:04,815 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:24:04,815 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:24:04,815 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:24:04,815 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:24:04,815 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:24:04,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, ASSIGN}] 2024-12-07T17:24:04,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T17:24:04,817 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, ASSIGN 2024-12-07T17:24:04,817 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, ASSIGN 2024-12-07T17:24:04,818 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:24:04,818 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:24:04,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-07T17:24:04,968 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:24:04,969 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=5f343c64200d857de0105da01dc231b3, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:04,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=c82b210e70c278e3baa9b29e85c306d7, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:04,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure c82b210e70c278e3baa9b29e85c306d7, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:24:04,972 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure 5f343c64200d857de0105da01dc231b3, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:24:05,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T17:24:05,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:05,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:05,129 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:05,129 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => c82b210e70c278e3baa9b29e85c306d7, NAME => 'testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:24:05,130 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. service=AccessControlService 2024-12-07T17:24:05,131 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:05,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:05,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,131 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,151 INFO [StoreOpener-c82b210e70c278e3baa9b29e85c306d7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,154 INFO [StoreOpener-c82b210e70c278e3baa9b29e85c306d7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c82b210e70c278e3baa9b29e85c306d7 columnFamilyName cf 2024-12-07T17:24:05,154 DEBUG [StoreOpener-c82b210e70c278e3baa9b29e85c306d7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:05,157 INFO [StoreOpener-c82b210e70c278e3baa9b29e85c306d7-1 {}] regionserver.HStore(327): Store=c82b210e70c278e3baa9b29e85c306d7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:05,159 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,159 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,165 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,175 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:05,175 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 5f343c64200d857de0105da01dc231b3, NAME => 'testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:24:05,176 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. service=AccessControlService 2024-12-07T17:24:05,176 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:05,177 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,177 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:05,177 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,177 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,179 INFO [StoreOpener-5f343c64200d857de0105da01dc231b3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,180 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:05,181 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened c82b210e70c278e3baa9b29e85c306d7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62113827, jitterRate=-0.07443185150623322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:05,182 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for c82b210e70c278e3baa9b29e85c306d7: 2024-12-07T17:24:05,183 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7., pid=84, masterSystemTime=1733592245124 2024-12-07T17:24:05,185 INFO [StoreOpener-5f343c64200d857de0105da01dc231b3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5f343c64200d857de0105da01dc231b3 columnFamilyName cf 2024-12-07T17:24:05,185 DEBUG [StoreOpener-5f343c64200d857de0105da01dc231b3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:05,193 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:05,193 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:05,193 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=c82b210e70c278e3baa9b29e85c306d7, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:05,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-07T17:24:05,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure c82b210e70c278e3baa9b29e85c306d7, server=5c70517e8267,38423,1733592165361 in 224 msec 2024-12-07T17:24:05,199 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, ASSIGN in 382 msec 2024-12-07T17:24:05,213 INFO [StoreOpener-5f343c64200d857de0105da01dc231b3-1 {}] regionserver.HStore(327): Store=5f343c64200d857de0105da01dc231b3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:05,215 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,215 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,243 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,262 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:05,263 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 5f343c64200d857de0105da01dc231b3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72172399, jitterRate=0.07545255124568939}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:05,263 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 5f343c64200d857de0105da01dc231b3: 2024-12-07T17:24:05,265 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3., pid=85, masterSystemTime=1733592245125 2024-12-07T17:24:05,267 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:05,267 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:05,268 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=5f343c64200d857de0105da01dc231b3, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:05,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82 2024-12-07T17:24:05,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure 5f343c64200d857de0105da01dc231b3, server=5c70517e8267,41313,1733592165267 in 298 msec 2024-12-07T17:24:05,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-07T17:24:05,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, ASSIGN in 477 msec 2024-12-07T17:24:05,296 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:24:05,296 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592245296"}]},"ts":"1733592245296"} 2024-12-07T17:24:05,298 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-07T17:24:05,301 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:24:05,302 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-07T17:24:05,304 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T17:24:05,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:05,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:05,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:05,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:05,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:05,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:05,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:05,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:05,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 608 msec 2024-12-07T17:24:05,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T17:24:05,320 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-07T17:24:05,320 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-07T17:24:05,321 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:05,325 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-07T17:24:05,326 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:05,326 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-07T17:24:05,331 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T17:24:05,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592245332 (current time:1733592245332). 2024-12-07T17:24:05,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:24:05,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-07T17:24:05,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:24:05,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a576671 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@329c47b8 2024-12-07T17:24:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54fc6151, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:05,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:05,379 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:05,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a576671 to 127.0.0.1:58133 2024-12-07T17:24:05,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:05,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d2c49fb to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@57e40bea 2024-12-07T17:24:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60a51eae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:05,398 DEBUG [hconnection-0xea9a58e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:05,400 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33276, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:05,404 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d2c49fb to 127.0.0.1:58133 2024-12-07T17:24:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T17:24:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:24:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T17:24:05,409 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:24:05,410 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:24:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-07T17:24:05,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T17:24:05,418 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:24:05,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742017_1193 (size=161) 2024-12-07T17:24:05,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742017_1193 (size=161) 2024-12-07T17:24:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742017_1193 (size=161) 2024-12-07T17:24:05,476 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:24:05,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7}] 2024-12-07T17:24:05,484 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,484 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T17:24:05,638 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:05,638 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:05,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-07T17:24:05,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-07T17:24:05,639 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for c82b210e70c278e3baa9b29e85c306d7: 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. for emptySnaptb0-testConsecutiveExports completed. 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:24:05,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:05,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 5f343c64200d857de0105da01dc231b3: 2024-12-07T17:24:05,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. for emptySnaptb0-testConsecutiveExports completed. 2024-12-07T17:24:05,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:05,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:05,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:24:05,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742018_1194 (size=68) 2024-12-07T17:24:05,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742018_1194 (size=68) 2024-12-07T17:24:05,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742018_1194 (size=68) 2024-12-07T17:24:05,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:05,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-07T17:24:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-07T17:24:05,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,666 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:05,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 in 191 msec 2024-12-07T17:24:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742019_1195 (size=68) 2024-12-07T17:24:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742019_1195 (size=68) 2024-12-07T17:24:05,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742019_1195 (size=68) 2024-12-07T17:24:05,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:05,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-07T17:24:05,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-07T17:24:05,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,676 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:05,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-07T17:24:05,679 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:24:05,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 in 201 msec 2024-12-07T17:24:05,679 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:24:05,680 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:24:05,680 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:05,681 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:05,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742020_1196 (size=543) 2024-12-07T17:24:05,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742020_1196 (size=543) 2024-12-07T17:24:05,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742020_1196 (size=543) 2024-12-07T17:24:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T17:24:05,726 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:24:05,733 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:24:05,734 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:05,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:24:05,736 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-07T17:24:05,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 329 msec 2024-12-07T17:24:06,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T17:24:06,017 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-07T17:24:06,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:24:06,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:24:06,046 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-07T17:24:06,046 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:06,047 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:06,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T17:24:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592246062 (current time:1733592246062). 2024-12-07T17:24:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:24:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-07T17:24:06,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:24:06,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef196b9 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1db7cd3c 2024-12-07T17:24:06,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e88475, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:06,072 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef196b9 to 127.0.0.1:58133 2024-12-07T17:24:06,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:06,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f192350 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ce4dba2 2024-12-07T17:24:06,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3d3367, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:06,090 DEBUG [hconnection-0x1a06e9be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:06,091 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33286, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:06,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:06,094 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f192350 to 127.0.0.1:58133 2024-12-07T17:24:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T17:24:06,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:24:06,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T17:24:06,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-07T17:24:06,101 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:24:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T17:24:06,103 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:24:06,111 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:24:06,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742021_1197 (size=156) 2024-12-07T17:24:06,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742021_1197 (size=156) 2024-12-07T17:24:06,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742021_1197 (size=156) 2024-12-07T17:24:06,136 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:24:06,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7}] 2024-12-07T17:24:06,144 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:06,144 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:06,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T17:24:06,295 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:06,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:06,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-07T17:24:06,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-07T17:24:06,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:06,297 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:06,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing c82b210e70c278e3baa9b29e85c306d7 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T17:24:06,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 5f343c64200d857de0105da01dc231b3 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T17:24:06,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/.tmp/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 is 71, key is 0111553a1a8f3908597846784873f0db/cf:q/1733592246039/Put/seqid=0 2024-12-07T17:24:06,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/.tmp/cf/85478618ece54ec3b3b98619f71acc8c is 71, key is 13323375de65d392231a8d83715228c8/cf:q/1733592246036/Put/seqid=0 2024-12-07T17:24:06,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742022_1198 (size=5422) 2024-12-07T17:24:06,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742022_1198 (size=5422) 2024-12-07T17:24:06,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742022_1198 (size=5422) 2024-12-07T17:24:06,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/.tmp/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 2024-12-07T17:24:06,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/.tmp/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 2024-12-07T17:24:06,359 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57, entries=5, sequenceid=6, filesize=5.3 K 2024-12-07T17:24:06,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 5f343c64200d857de0105da01dc231b3 in 63ms, sequenceid=6, compaction requested=false 2024-12-07T17:24:06,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 5f343c64200d857de0105da01dc231b3: 2024-12-07T17:24:06,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. for snaptb0-testConsecutiveExports completed. 2024-12-07T17:24:06,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-07T17:24:06,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:06,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57] hfiles 2024-12-07T17:24:06,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 for snapshot=snaptb0-testConsecutiveExports 2024-12-07T17:24:06,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742023_1199 (size=8188) 2024-12-07T17:24:06,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742023_1199 (size=8188) 2024-12-07T17:24:06,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742023_1199 (size=8188) 2024-12-07T17:24:06,364 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/.tmp/cf/85478618ece54ec3b3b98619f71acc8c 2024-12-07T17:24:06,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/.tmp/cf/85478618ece54ec3b3b98619f71acc8c as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c 2024-12-07T17:24:06,376 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c, entries=45, sequenceid=6, filesize=8.0 K 2024-12-07T17:24:06,377 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for c82b210e70c278e3baa9b29e85c306d7 in 80ms, sequenceid=6, compaction requested=false 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for c82b210e70c278e3baa9b29e85c306d7: 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. for snaptb0-testConsecutiveExports completed. 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c] hfiles 2024-12-07T17:24:06,378 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c for snapshot=snaptb0-testConsecutiveExports 2024-12-07T17:24:06,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742024_1200 (size=107) 2024-12-07T17:24:06,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742024_1200 (size=107) 2024-12-07T17:24:06,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742024_1200 (size=107) 2024-12-07T17:24:06,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:06,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-07T17:24:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-07T17:24:06,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:06,393 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:06,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 5f343c64200d857de0105da01dc231b3 in 258 msec 2024-12-07T17:24:06,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T17:24:06,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742025_1201 (size=107) 2024-12-07T17:24:06,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742025_1201 (size=107) 2024-12-07T17:24:06,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742025_1201 (size=107) 2024-12-07T17:24:06,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:06,411 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-07T17:24:06,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-07T17:24:06,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:06,412 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:06,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=89 2024-12-07T17:24:06,415 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:24:06,415 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure c82b210e70c278e3baa9b29e85c306d7 in 277 msec 2024-12-07T17:24:06,415 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:24:06,416 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:24:06,416 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-07T17:24:06,417 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T17:24:06,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742026_1202 (size=621) 2024-12-07T17:24:06,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742026_1202 (size=621) 2024-12-07T17:24:06,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742026_1202 (size=621) 2024-12-07T17:24:06,450 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:24:06,457 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:24:06,457 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T17:24:06,459 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:24:06,459 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-07T17:24:06,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 363 msec 2024-12-07T17:24:06,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T17:24:06,707 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-07T17:24:06,707 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707 2024-12-07T17:24:06,707 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:06,754 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:06,754 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@74070ae7, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T17:24:06,757 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:24:06,767 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T17:24:06,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:06,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:06,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:06,814 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:07,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-18135742888052838368.jar 2024-12-07T17:24:07,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:07,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-10774020904541940281.jar 2024-12-07T17:24:08,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,033 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:08,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:24:08,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:24:08,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:24:08,034 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:24:08,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:24:08,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:24:08,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:24:08,035 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:24:08,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:24:08,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:24:08,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:24:08,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:24:08,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:08,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:08,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:08,037 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:08,038 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:08,038 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:08,038 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:08,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742027_1203 (size=451756) 2024-12-07T17:24:08,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742027_1203 (size=451756) 2024-12-07T17:24:08,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742027_1203 (size=451756) 2024-12-07T17:24:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742028_1204 (size=127628) 2024-12-07T17:24:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742028_1204 (size=127628) 2024-12-07T17:24:08,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742028_1204 (size=127628) 2024-12-07T17:24:08,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742029_1205 (size=2172101) 2024-12-07T17:24:08,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742029_1205 (size=2172101) 2024-12-07T17:24:08,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742029_1205 (size=2172101) 2024-12-07T17:24:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742030_1206 (size=213228) 2024-12-07T17:24:08,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742030_1206 (size=213228) 2024-12-07T17:24:08,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742030_1206 (size=213228) 2024-12-07T17:24:08,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742031_1207 (size=1877034) 2024-12-07T17:24:08,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742031_1207 (size=1877034) 2024-12-07T17:24:08,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742031_1207 (size=1877034) 2024-12-07T17:24:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742032_1208 (size=533455) 2024-12-07T17:24:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742032_1208 (size=533455) 2024-12-07T17:24:08,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742032_1208 (size=533455) 2024-12-07T17:24:08,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742033_1209 (size=7280644) 2024-12-07T17:24:08,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742033_1209 (size=7280644) 2024-12-07T17:24:08,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742033_1209 (size=7280644) 2024-12-07T17:24:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T17:24:08,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T17:24:08,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T17:24:08,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742035_1211 (size=20406) 2024-12-07T17:24:08,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742035_1211 (size=20406) 2024-12-07T17:24:08,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742035_1211 (size=20406) 2024-12-07T17:24:08,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742036_1212 (size=75495) 2024-12-07T17:24:08,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742036_1212 (size=75495) 2024-12-07T17:24:08,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742036_1212 (size=75495) 2024-12-07T17:24:08,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742037_1213 (size=45609) 2024-12-07T17:24:08,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742037_1213 (size=45609) 2024-12-07T17:24:08,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742037_1213 (size=45609) 2024-12-07T17:24:08,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742038_1214 (size=110084) 2024-12-07T17:24:08,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742038_1214 (size=110084) 2024-12-07T17:24:08,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742038_1214 (size=110084) 2024-12-07T17:24:08,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T17:24:08,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T17:24:08,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T17:24:08,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742040_1216 (size=23076) 2024-12-07T17:24:08,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742040_1216 (size=23076) 2024-12-07T17:24:08,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742040_1216 (size=23076) 2024-12-07T17:24:08,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742041_1217 (size=126803) 2024-12-07T17:24:08,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742041_1217 (size=126803) 2024-12-07T17:24:08,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742041_1217 (size=126803) 2024-12-07T17:24:08,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742042_1218 (size=322274) 2024-12-07T17:24:08,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742042_1218 (size=322274) 2024-12-07T17:24:08,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742042_1218 (size=322274) 2024-12-07T17:24:08,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T17:24:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T17:24:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T17:24:08,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742044_1220 (size=30081) 2024-12-07T17:24:08,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742044_1220 (size=30081) 2024-12-07T17:24:08,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742044_1220 (size=30081) 2024-12-07T17:24:08,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742045_1221 (size=53616) 2024-12-07T17:24:08,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742045_1221 (size=53616) 2024-12-07T17:24:08,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742045_1221 (size=53616) 2024-12-07T17:24:08,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742046_1222 (size=29229) 2024-12-07T17:24:08,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742046_1222 (size=29229) 2024-12-07T17:24:08,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742046_1222 (size=29229) 2024-12-07T17:24:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742047_1223 (size=169089) 2024-12-07T17:24:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742047_1223 (size=169089) 2024-12-07T17:24:08,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742047_1223 (size=169089) 2024-12-07T17:24:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T17:24:08,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T17:24:08,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T17:24:08,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742049_1225 (size=136454) 2024-12-07T17:24:08,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742049_1225 (size=136454) 2024-12-07T17:24:08,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742049_1225 (size=136454) 2024-12-07T17:24:08,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742050_1226 (size=6350154) 2024-12-07T17:24:08,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742050_1226 (size=6350154) 2024-12-07T17:24:08,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742050_1226 (size=6350154) 2024-12-07T17:24:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742051_1227 (size=907853) 2024-12-07T17:24:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742051_1227 (size=907853) 2024-12-07T17:24:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742051_1227 (size=907853) 2024-12-07T17:24:08,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T17:24:08,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T17:24:08,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T17:24:08,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742053_1229 (size=503880) 2024-12-07T17:24:08,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742053_1229 (size=503880) 2024-12-07T17:24:08,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742053_1229 (size=503880) 2024-12-07T17:24:08,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T17:24:08,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T17:24:08,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T17:24:08,515 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:24:08,517 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-07T17:24:08,520 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:24:08,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742055_1231 (size=338) 2024-12-07T17:24:08,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742055_1231 (size=338) 2024-12-07T17:24:08,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742055_1231 (size=338) 2024-12-07T17:24:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742056_1232 (size=15) 2024-12-07T17:24:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742056_1232 (size=15) 2024-12-07T17:24:08,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742056_1232 (size=15) 2024-12-07T17:24:08,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742057_1233 (size=304934) 2024-12-07T17:24:08,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742057_1233 (size=304934) 2024-12-07T17:24:08,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742057_1233 (size=304934) 2024-12-07T17:24:09,062 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:09,062 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:09,065 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0003_000001 (auth:SIMPLE) from 127.0.0.1:35706 2024-12-07T17:24:09,078 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000001/launch_container.sh] 2024-12-07T17:24:09,078 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000001/container_tokens] 2024-12-07T17:24:09,078 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0003/container_1733592173099_0003_01_000001/sysfs] 2024-12-07T17:24:09,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0004_000001 (auth:SIMPLE) from 127.0.0.1:60696 2024-12-07T17:24:09,923 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:24:13,249 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:24:14,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-07T17:24:14,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-07T17:24:15,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0004_000001 (auth:SIMPLE) from 127.0.0.1:42556 2024-12-07T17:24:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742058_1234 (size=350608) 2024-12-07T17:24:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742058_1234 (size=350608) 2024-12-07T17:24:16,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742058_1234 (size=350608) 2024-12-07T17:24:18,173 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0004_000001 (auth:SIMPLE) from 127.0.0.1:38140 2024-12-07T17:24:23,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742059_1235 (size=17447) 2024-12-07T17:24:23,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742059_1235 (size=17447) 2024-12-07T17:24:23,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742059_1235 (size=17447) 2024-12-07T17:24:23,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742060_1236 (size=462) 2024-12-07T17:24:23,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742060_1236 (size=462) 2024-12-07T17:24:23,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742060_1236 (size=462) 2024-12-07T17:24:23,727 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000002/launch_container.sh] 2024-12-07T17:24:23,727 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000002/container_tokens] 2024-12-07T17:24:23,727 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000002/sysfs] 2024-12-07T17:24:23,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742061_1237 (size=17447) 2024-12-07T17:24:23,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742061_1237 (size=17447) 2024-12-07T17:24:23,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742061_1237 (size=17447) 2024-12-07T17:24:23,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742062_1238 (size=350608) 2024-12-07T17:24:23,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742062_1238 (size=350608) 2024-12-07T17:24:23,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742062_1238 (size=350608) 2024-12-07T17:24:23,772 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0004_000001 (auth:SIMPLE) from 127.0.0.1:32852 2024-12-07T17:24:25,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:24:25,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:24:25,804 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-07T17:24:25,805 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:24:25,805 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:24:25,805 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T17:24:25,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T17:24:25,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T17:24:25,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@74070ae7 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T17:24:25,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T17:24:25,825 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T17:24:25,827 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:25,859 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:25,859 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@74070ae7, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T17:24:25,861 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:24:25,866 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T17:24:25,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-11563183783033396599.jar 2024-12-07T17:24:27,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,144 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-13695150164546781543.jar 2024-12-07T17:24:27,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:27,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:24:27,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:24:27,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:24:27,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:24:27,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:24:27,147 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:24:27,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:24:27,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:24:27,148 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:24:27,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:24:27,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:24:27,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:24:27,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:27,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:27,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:27,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:27,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:27,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:27,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:27,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742063_1239 (size=127628) 2024-12-07T17:24:27,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742063_1239 (size=127628) 2024-12-07T17:24:27,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742063_1239 (size=127628) 2024-12-07T17:24:27,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T17:24:27,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T17:24:27,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T17:24:27,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742065_1241 (size=213228) 2024-12-07T17:24:27,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742065_1241 (size=213228) 2024-12-07T17:24:27,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742065_1241 (size=213228) 2024-12-07T17:24:27,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742066_1242 (size=6350154) 2024-12-07T17:24:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742066_1242 (size=6350154) 2024-12-07T17:24:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742066_1242 (size=6350154) 2024-12-07T17:24:27,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742067_1243 (size=1877034) 2024-12-07T17:24:27,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742067_1243 (size=1877034) 2024-12-07T17:24:27,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742067_1243 (size=1877034) 2024-12-07T17:24:27,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742068_1244 (size=533455) 2024-12-07T17:24:27,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742068_1244 (size=533455) 2024-12-07T17:24:27,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742068_1244 (size=533455) 2024-12-07T17:24:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742069_1245 (size=451756) 2024-12-07T17:24:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742069_1245 (size=451756) 2024-12-07T17:24:27,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742069_1245 (size=451756) 2024-12-07T17:24:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742070_1246 (size=7280644) 2024-12-07T17:24:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742070_1246 (size=7280644) 2024-12-07T17:24:27,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742070_1246 (size=7280644) 2024-12-07T17:24:27,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T17:24:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T17:24:27,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T17:24:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742072_1248 (size=20406) 2024-12-07T17:24:27,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742072_1248 (size=20406) 2024-12-07T17:24:27,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742072_1248 (size=20406) 2024-12-07T17:24:27,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742073_1249 (size=75495) 2024-12-07T17:24:27,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742073_1249 (size=75495) 2024-12-07T17:24:27,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742073_1249 (size=75495) 2024-12-07T17:24:27,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742074_1250 (size=45609) 2024-12-07T17:24:27,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742074_1250 (size=45609) 2024-12-07T17:24:27,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742074_1250 (size=45609) 2024-12-07T17:24:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742075_1251 (size=110084) 2024-12-07T17:24:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742075_1251 (size=110084) 2024-12-07T17:24:27,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742075_1251 (size=110084) 2024-12-07T17:24:27,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T17:24:27,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T17:24:27,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T17:24:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742077_1253 (size=23076) 2024-12-07T17:24:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742077_1253 (size=23076) 2024-12-07T17:24:27,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742077_1253 (size=23076) 2024-12-07T17:24:27,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742078_1254 (size=126803) 2024-12-07T17:24:27,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742078_1254 (size=126803) 2024-12-07T17:24:27,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742078_1254 (size=126803) 2024-12-07T17:24:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742079_1255 (size=322274) 2024-12-07T17:24:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742079_1255 (size=322274) 2024-12-07T17:24:27,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742079_1255 (size=322274) 2024-12-07T17:24:27,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T17:24:27,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T17:24:27,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T17:24:27,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742081_1257 (size=30081) 2024-12-07T17:24:27,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742081_1257 (size=30081) 2024-12-07T17:24:27,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742081_1257 (size=30081) 2024-12-07T17:24:27,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742082_1258 (size=53616) 2024-12-07T17:24:27,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742082_1258 (size=53616) 2024-12-07T17:24:27,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742082_1258 (size=53616) 2024-12-07T17:24:27,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742083_1259 (size=29229) 2024-12-07T17:24:27,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742083_1259 (size=29229) 2024-12-07T17:24:27,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742083_1259 (size=29229) 2024-12-07T17:24:27,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742084_1260 (size=169089) 2024-12-07T17:24:27,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742084_1260 (size=169089) 2024-12-07T17:24:27,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742084_1260 (size=169089) 2024-12-07T17:24:27,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T17:24:27,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T17:24:27,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T17:24:27,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742086_1262 (size=136454) 2024-12-07T17:24:27,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742086_1262 (size=136454) 2024-12-07T17:24:27,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742086_1262 (size=136454) 2024-12-07T17:24:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742087_1263 (size=907853) 2024-12-07T17:24:27,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742087_1263 (size=907853) 2024-12-07T17:24:27,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742087_1263 (size=907853) 2024-12-07T17:24:27,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T17:24:27,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T17:24:27,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T17:24:27,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742089_1265 (size=503880) 2024-12-07T17:24:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742089_1265 (size=503880) 2024-12-07T17:24:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742089_1265 (size=503880) 2024-12-07T17:24:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T17:24:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T17:24:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T17:24:28,019 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:24:28,022 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-07T17:24:28,025 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:24:28,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742091_1267 (size=338) 2024-12-07T17:24:28,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742091_1267 (size=338) 2024-12-07T17:24:28,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742091_1267 (size=338) 2024-12-07T17:24:28,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742092_1268 (size=15) 2024-12-07T17:24:28,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742092_1268 (size=15) 2024-12-07T17:24:28,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742092_1268 (size=15) 2024-12-07T17:24:28,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742093_1269 (size=304934) 2024-12-07T17:24:28,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742093_1269 (size=304934) 2024-12-07T17:24:28,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742093_1269 (size=304934) 2024-12-07T17:24:29,846 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:29,846 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:29,850 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0004_000001 (auth:SIMPLE) from 127.0.0.1:58164 2024-12-07T17:24:29,861 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000001/launch_container.sh] 2024-12-07T17:24:29,861 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000001/container_tokens] 2024-12-07T17:24:29,861 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0004/container_1733592173099_0004_01_000001/sysfs] 2024-12-07T17:24:30,716 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0005_000001 (auth:SIMPLE) from 127.0.0.1:32862 2024-12-07T17:24:36,745 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0005_000001 (auth:SIMPLE) from 127.0.0.1:36176 2024-12-07T17:24:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742094_1270 (size=350608) 2024-12-07T17:24:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742094_1270 (size=350608) 2024-12-07T17:24:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742094_1270 (size=350608) 2024-12-07T17:24:39,094 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0005_000001 (auth:SIMPLE) from 127.0.0.1:54030 2024-12-07T17:24:43,250 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:24:44,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742095_1271 (size=16925) 2024-12-07T17:24:44,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742095_1271 (size=16925) 2024-12-07T17:24:44,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742095_1271 (size=16925) 2024-12-07T17:24:44,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742096_1272 (size=462) 2024-12-07T17:24:44,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742096_1272 (size=462) 2024-12-07T17:24:44,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742096_1272 (size=462) 2024-12-07T17:24:44,784 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_2/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000002/launch_container.sh] 2024-12-07T17:24:44,784 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_2/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000002/container_tokens] 2024-12-07T17:24:44,784 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_2/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000002/sysfs] 2024-12-07T17:24:44,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742097_1273 (size=16925) 2024-12-07T17:24:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742097_1273 (size=16925) 2024-12-07T17:24:44,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742097_1273 (size=16925) 2024-12-07T17:24:44,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742098_1274 (size=350608) 2024-12-07T17:24:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742098_1274 (size=350608) 2024-12-07T17:24:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742098_1274 (size=350608) 2024-12-07T17:24:44,836 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0005_000001 (auth:SIMPLE) from 127.0.0.1:43552 2024-12-07T17:24:46,305 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:24:46,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:24:46,313 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-07T17:24:46,313 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:24:46,314 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:24:46,314 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T17:24:46,315 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T17:24:46,315 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T17:24:46,315 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@74070ae7 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T17:24:46,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T17:24:46,316 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592246707/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T17:24:46,344 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-07T17:24:46,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-07T17:24:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T17:24:46,349 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592286349"}]},"ts":"1733592286349"} 2024-12-07T17:24:46,351 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-07T17:24:46,353 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-07T17:24:46,353 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-07T17:24:46,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, UNASSIGN}] 2024-12-07T17:24:46,356 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, UNASSIGN 2024-12-07T17:24:46,356 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, UNASSIGN 2024-12-07T17:24:46,357 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=c82b210e70c278e3baa9b29e85c306d7, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:46,357 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=5f343c64200d857de0105da01dc231b3, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:46,358 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:46,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=94, state=RUNNABLE; CloseRegionProcedure 5f343c64200d857de0105da01dc231b3, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:24:46,361 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:46,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; CloseRegionProcedure c82b210e70c278e3baa9b29e85c306d7, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:24:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T17:24:46,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:46,512 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:24:46,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 5f343c64200d857de0105da01dc231b3, disabling compactions & flushes 2024-12-07T17:24:46,513 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. after waiting 0 ms 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:46,513 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:46,513 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:24:46,514 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing c82b210e70c278e3baa9b29e85c306d7, disabling compactions & flushes 2024-12-07T17:24:46,514 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:46,514 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:46,514 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. after waiting 0 ms 2024-12-07T17:24:46,514 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:46,520 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:24:46,521 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:46,521 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3. 2024-12-07T17:24:46,521 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 5f343c64200d857de0105da01dc231b3: 2024-12-07T17:24:46,522 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:24:46,523 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:46,523 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:46,523 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7. 2024-12-07T17:24:46,523 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for c82b210e70c278e3baa9b29e85c306d7: 2024-12-07T17:24:46,528 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=5f343c64200d857de0105da01dc231b3, regionState=CLOSED 2024-12-07T17:24:46,529 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:46,530 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=c82b210e70c278e3baa9b29e85c306d7, regionState=CLOSED 2024-12-07T17:24:46,532 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=94 2024-12-07T17:24:46,533 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=94, state=SUCCESS; CloseRegionProcedure 5f343c64200d857de0105da01dc231b3, server=5c70517e8267,41313,1733592165267 in 172 msec 2024-12-07T17:24:46,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-12-07T17:24:46,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=5f343c64200d857de0105da01dc231b3, UNASSIGN in 178 msec 2024-12-07T17:24:46,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; CloseRegionProcedure c82b210e70c278e3baa9b29e85c306d7, server=5c70517e8267,38423,1733592165361 in 171 msec 2024-12-07T17:24:46,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-07T17:24:46,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=c82b210e70c278e3baa9b29e85c306d7, UNASSIGN in 179 msec 2024-12-07T17:24:46,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-07T17:24:46,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 184 msec 2024-12-07T17:24:46,540 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592286540"}]},"ts":"1733592286540"} 2024-12-07T17:24:46,541 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-07T17:24:46,543 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-07T17:24:46,546 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 199 msec 2024-12-07T17:24:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T17:24:46,651 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-07T17:24:46,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-07T17:24:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,661 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-07T17:24:46,662 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,664 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-07T17:24:46,668 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:46,668 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:46,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T17:24:46,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T17:24:46,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T17:24:46,672 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:46,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T17:24:46,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:46,679 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/recovered.edits] 2024-12-07T17:24:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-07T17:24:46,694 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/recovered.edits] 2024-12-07T17:24:46,696 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/cf/85478618ece54ec3b3b98619f71acc8c 2024-12-07T17:24:46,701 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/cf/a559e13bfc774f2eaff7ed9a6e9b5b57 2024-12-07T17:24:46,701 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7/recovered.edits/9.seqid 2024-12-07T17:24:46,702 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/c82b210e70c278e3baa9b29e85c306d7 2024-12-07T17:24:46,706 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3/recovered.edits/9.seqid 2024-12-07T17:24:46,707 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testConsecutiveExports/5f343c64200d857de0105da01dc231b3 2024-12-07T17:24:46,707 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-07T17:24:46,709 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,712 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-07T17:24:46,715 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-07T17:24:46,722 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,722 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-07T17:24:46,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592286722"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:46,722 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592286722"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:46,726 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:24:46,726 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5f343c64200d857de0105da01dc231b3, NAME => 'testtb-testConsecutiveExports,,1733592244702.5f343c64200d857de0105da01dc231b3.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c82b210e70c278e3baa9b29e85c306d7, NAME => 'testtb-testConsecutiveExports,1,1733592244702.c82b210e70c278e3baa9b29e85c306d7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:24:46,726 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-07T17:24:46,726 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592286726"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:46,734 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-07T17:24:46,737 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T17:24:46,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 84 msec 2024-12-07T17:24:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-07T17:24:46,782 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-07T17:24:46,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-07T17:24:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-07T17:24:46,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-07T17:24:46,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-07T17:24:46,825 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=780 (was 781), OpenFileDescriptor=789 (was 787) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=577 (was 599), ProcessCount=20 (was 20), AvailableMemoryMB=4531 (was 4969) 2024-12-07T17:24:46,825 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=780 is superior to 500 2024-12-07T17:24:46,849 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=780, OpenFileDescriptor=789, MaxFileDescriptor=1048576, SystemLoadAverage=577, ProcessCount=20, AvailableMemoryMB=4531 2024-12-07T17:24:46,849 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=780 is superior to 500 2024-12-07T17:24:46,851 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:24:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:46,855 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:24:46,855 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:46,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-07T17:24:46,856 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:24:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T17:24:46,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742099_1275 (size=422) 2024-12-07T17:24:46,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742099_1275 (size=422) 2024-12-07T17:24:46,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742099_1275 (size=422) 2024-12-07T17:24:46,885 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 4c8dcef6eef6c6ffe6eec4eeefe6c98b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:46,885 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7e007c78d4bbd37599fc2cd27d69bfde, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:46,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742100_1276 (size=83) 2024-12-07T17:24:46,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742100_1276 (size=83) 2024-12-07T17:24:46,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742100_1276 (size=83) 2024-12-07T17:24:46,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:46,910 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 7e007c78d4bbd37599fc2cd27d69bfde, disabling compactions & flushes 2024-12-07T17:24:46,910 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:46,911 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:46,911 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. after waiting 0 ms 2024-12-07T17:24:46,911 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:46,911 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:46,911 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7e007c78d4bbd37599fc2cd27d69bfde: 2024-12-07T17:24:46,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742101_1277 (size=83) 2024-12-07T17:24:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742101_1277 (size=83) 2024-12-07T17:24:46,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742101_1277 (size=83) 2024-12-07T17:24:46,915 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:46,915 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing 4c8dcef6eef6c6ffe6eec4eeefe6c98b, disabling compactions & flushes 2024-12-07T17:24:46,915 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:46,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:46,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. after waiting 0 ms 2024-12-07T17:24:46,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:46,916 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:46,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for 4c8dcef6eef6c6ffe6eec4eeefe6c98b: 2024-12-07T17:24:46,917 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:24:46,917 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733592286917"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592286917"}]},"ts":"1733592286917"} 2024-12-07T17:24:46,917 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733592286917"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592286917"}]},"ts":"1733592286917"} 2024-12-07T17:24:46,922 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:24:46,923 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:24:46,923 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592286923"}]},"ts":"1733592286923"} 2024-12-07T17:24:46,925 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-07T17:24:46,929 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:24:46,930 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:24:46,930 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:24:46,930 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:24:46,930 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:24:46,930 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:24:46,930 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:24:46,930 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:24:46,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, ASSIGN}] 2024-12-07T17:24:46,931 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, ASSIGN 2024-12-07T17:24:46,932 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, ASSIGN 2024-12-07T17:24:46,932 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:24:46,932 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:24:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T17:24:47,083 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:24:47,084 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=4c8dcef6eef6c6ffe6eec4eeefe6c98b, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:47,084 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7e007c78d4bbd37599fc2cd27d69bfde, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:47,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:24:47,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=100, state=RUNNABLE; OpenRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:24:47,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T17:24:47,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:47,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:47,243 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:47,243 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c8dcef6eef6c6ffe6eec4eeefe6c98b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:24:47,243 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. service=AccessControlService 2024-12-07T17:24:47,244 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:47,244 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 7e007c78d4bbd37599fc2cd27d69bfde, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. service=AccessControlService 2024-12-07T17:24:47,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,244 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:47,245 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,245 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:47,245 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,245 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,246 INFO [StoreOpener-4c8dcef6eef6c6ffe6eec4eeefe6c98b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,247 INFO [StoreOpener-7e007c78d4bbd37599fc2cd27d69bfde-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,247 INFO [StoreOpener-4c8dcef6eef6c6ffe6eec4eeefe6c98b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c8dcef6eef6c6ffe6eec4eeefe6c98b columnFamilyName cf 2024-12-07T17:24:47,248 DEBUG [StoreOpener-4c8dcef6eef6c6ffe6eec4eeefe6c98b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:47,248 INFO [StoreOpener-7e007c78d4bbd37599fc2cd27d69bfde-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e007c78d4bbd37599fc2cd27d69bfde columnFamilyName cf 2024-12-07T17:24:47,249 DEBUG [StoreOpener-7e007c78d4bbd37599fc2cd27d69bfde-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:47,249 INFO [StoreOpener-4c8dcef6eef6c6ffe6eec4eeefe6c98b-1 {}] regionserver.HStore(327): Store=4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:47,249 INFO [StoreOpener-7e007c78d4bbd37599fc2cd27d69bfde-1 {}] regionserver.HStore(327): Store=7e007c78d4bbd37599fc2cd27d69bfde/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:47,250 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,250 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,250 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,250 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,253 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,253 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,255 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:47,255 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:47,255 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened 4c8dcef6eef6c6ffe6eec4eeefe6c98b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65695944, jitterRate=-0.02105414867401123}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:47,256 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 7e007c78d4bbd37599fc2cd27d69bfde; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66475104, jitterRate=-0.00944375991821289}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:47,256 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 7e007c78d4bbd37599fc2cd27d69bfde: 2024-12-07T17:24:47,256 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for 4c8dcef6eef6c6ffe6eec4eeefe6c98b: 2024-12-07T17:24:47,257 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b., pid=102, masterSystemTime=1733592287239 2024-12-07T17:24:47,257 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde., pid=103, masterSystemTime=1733592287239 2024-12-07T17:24:47,259 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:47,259 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:47,259 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=7e007c78d4bbd37599fc2cd27d69bfde, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:47,259 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:47,259 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:47,260 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=4c8dcef6eef6c6ffe6eec4eeefe6c98b, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:24:47,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=100 2024-12-07T17:24:47,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=100, state=SUCCESS; OpenRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde, server=5c70517e8267,36163,1733592165482 in 175 msec 2024-12-07T17:24:47,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-07T17:24:47,264 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b, server=5c70517e8267,38423,1733592165361 in 177 msec 2024-12-07T17:24:47,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, ASSIGN in 333 msec 2024-12-07T17:24:47,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-07T17:24:47,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, ASSIGN in 334 msec 2024-12-07T17:24:47,267 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:24:47,268 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592287268"}]},"ts":"1733592287268"} 2024-12-07T17:24:47,269 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-07T17:24:47,273 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:24:47,274 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-07T17:24:47,276 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T17:24:47,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:47,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:47,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:47,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:47,280 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:47,280 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:47,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:47,281 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:47,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 429 msec 2024-12-07T17:24:47,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T17:24:47,462 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-07T17:24:47,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-07T17:24:47,462 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:47,479 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-07T17:24:47,479 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:47,480 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-07T17:24:47,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T17:24:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592287485 (current time:1733592287485). 2024-12-07T17:24:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:24:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-07T17:24:47,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:24:47,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ca552cf to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454e6692 2024-12-07T17:24:47,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560a8cc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:47,497 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ca552cf to 127.0.0.1:58133 2024-12-07T17:24:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:47,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c2f3508 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@ae3bee9 2024-12-07T17:24:47,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@80d7fcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:47,528 DEBUG [hconnection-0x4301d69b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:47,529 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53802, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:47,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:47,533 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c2f3508 to 127.0.0.1:58133 2024-12-07T17:24:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T17:24:47,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:24:47,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T17:24:47,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-07T17:24:47,538 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:24:47,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T17:24:47,539 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:24:47,541 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:24:47,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742102_1278 (size=215) 2024-12-07T17:24:47,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742102_1278 (size=215) 2024-12-07T17:24:47,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742102_1278 (size=215) 2024-12-07T17:24:47,562 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:24:47,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b}] 2024-12-07T17:24:47,564 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,564 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T17:24:47,641 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-07T17:24:47,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:47,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:47,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-07T17:24:47,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-07T17:24:47,716 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 7e007c78d4bbd37599fc2cd27d69bfde: 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for 4c8dcef6eef6c6ffe6eec4eeefe6c98b: 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:24:47,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:24:47,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742103_1279 (size=86) 2024-12-07T17:24:47,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742103_1279 (size=86) 2024-12-07T17:24:47,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742103_1279 (size=86) 2024-12-07T17:24:47,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:47,764 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-07T17:24:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-07T17:24:47,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,765 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:47,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b in 203 msec 2024-12-07T17:24:47,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742104_1280 (size=86) 2024-12-07T17:24:47,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742104_1280 (size=86) 2024-12-07T17:24:47,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742104_1280 (size=86) 2024-12-07T17:24:47,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:47,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-07T17:24:47,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-07T17:24:47,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,781 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:47,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-07T17:24:47,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde in 219 msec 2024-12-07T17:24:47,785 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:24:47,792 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:24:47,793 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:24:47,793 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:47,794 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:47,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742105_1281 (size=597) 2024-12-07T17:24:47,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742105_1281 (size=597) 2024-12-07T17:24:47,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742105_1281 (size=597) 2024-12-07T17:24:47,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T17:24:47,842 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:24:47,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:24:47,849 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:47,851 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:24:47,851 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-07T17:24:47,853 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 316 msec 2024-12-07T17:24:48,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T17:24:48,143 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-07T17:24:48,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:24:48,167 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:24:48,168 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 7e007c78d4bbd37599fc2cd27d69bfde changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:24:48,168 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4c8dcef6eef6c6ffe6eec4eeefe6c98b changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:24:48,179 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,179 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:48,179 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:24:48,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T17:24:48,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592288213 (current time:1733592288213). 2024-12-07T17:24:48,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:24:48,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-07T17:24:48,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:24:48,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72d028a1 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f288d6e 2024-12-07T17:24:48,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20902111, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:48,236 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:48,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72d028a1 to 127.0.0.1:58133 2024-12-07T17:24:48,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:48,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44d372e3 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f6bc4ea 2024-12-07T17:24:48,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ed2e2bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:48,250 DEBUG [hconnection-0x7722900-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:48,251 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:48,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:48,258 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:48,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44d372e3 to 127.0.0.1:58133 2024-12-07T17:24:48,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:48,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T17:24:48,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:24:48,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T17:24:48,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-07T17:24:48,264 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:24:48,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T17:24:48,265 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:24:48,268 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:24:48,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742106_1282 (size=210) 2024-12-07T17:24:48,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742106_1282 (size=210) 2024-12-07T17:24:48,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742106_1282 (size=210) 2024-12-07T17:24:48,312 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:24:48,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b}] 2024-12-07T17:24:48,314 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:48,314 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T17:24:48,465 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:24:48,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:48,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-07T17:24:48,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-07T17:24:48,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:48,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:48,466 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 7e007c78d4bbd37599fc2cd27d69bfde 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-07T17:24:48,466 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 4c8dcef6eef6c6ffe6eec4eeefe6c98b 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-07T17:24:48,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/.tmp/cf/910ffb65130c4d08bbfd48eb9c39924a is 71, key is 021674c0be00d6a072c42407bb1f78c5/cf:q/1733592288163/Put/seqid=0 2024-12-07T17:24:48,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/.tmp/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 is 71, key is 10ca98de3405d4c71104c192591af4eb/cf:q/1733592288167/Put/seqid=0 2024-12-07T17:24:48,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742107_1283 (size=5216) 2024-12-07T17:24:48,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742107_1283 (size=5216) 2024-12-07T17:24:48,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742107_1283 (size=5216) 2024-12-07T17:24:48,519 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/.tmp/cf/910ffb65130c4d08bbfd48eb9c39924a 2024-12-07T17:24:48,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/.tmp/cf/910ffb65130c4d08bbfd48eb9c39924a as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a 2024-12-07T17:24:48,535 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a, entries=2, sequenceid=6, filesize=5.1 K 2024-12-07T17:24:48,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 7e007c78d4bbd37599fc2cd27d69bfde in 70ms, sequenceid=6, compaction requested=false 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 7e007c78d4bbd37599fc2cd27d69bfde: 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a] hfiles 2024-12-07T17:24:48,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742108_1284 (size=8392) 2024-12-07T17:24:48,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742108_1284 (size=8392) 2024-12-07T17:24:48,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742108_1284 (size=8392) 2024-12-07T17:24:48,559 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/.tmp/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 2024-12-07T17:24:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T17:24:48,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/.tmp/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 2024-12-07T17:24:48,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59, entries=48, sequenceid=6, filesize=8.2 K 2024-12-07T17:24:48,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 4c8dcef6eef6c6ffe6eec4eeefe6c98b in 112ms, sequenceid=6, compaction requested=false 2024-12-07T17:24:48,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 4c8dcef6eef6c6ffe6eec4eeefe6c98b: 2024-12-07T17:24:48,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T17:24:48,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:48,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59] hfiles 2024-12-07T17:24:48,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742109_1285 (size=125) 2024-12-07T17:24:48,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742109_1285 (size=125) 2024-12-07T17:24:48,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742109_1285 (size=125) 2024-12-07T17:24:48,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:24:48,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-07T17:24:48,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-07T17:24:48,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:48,590 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:24:48,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde in 277 msec 2024-12-07T17:24:48,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742110_1286 (size=125) 2024-12-07T17:24:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742110_1286 (size=125) 2024-12-07T17:24:48,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742110_1286 (size=125) 2024-12-07T17:24:48,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:24:48,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-07T17:24:48,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-07T17:24:48,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:48,610 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:24:48,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-07T17:24:48,613 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:24:48,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b in 298 msec 2024-12-07T17:24:48,614 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:24:48,614 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:24:48,614 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,620 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742111_1287 (size=675) 2024-12-07T17:24:48,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742111_1287 (size=675) 2024-12-07T17:24:48,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742111_1287 (size=675) 2024-12-07T17:24:48,651 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:24:48,656 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:24:48,657 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:48,658 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:24:48,658 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-07T17:24:48,659 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 396 msec 2024-12-07T17:24:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T17:24:48,869 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-07T17:24:48,890 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:24:48,892 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:24:48,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T17:24:48,894 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:24:48,897 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:24:48,897 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T17:24:48,899 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T17:24:48,902 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T17:24:48,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T17:24:48,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:24:48,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:48,907 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:24:48,907 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:48,907 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-07T17:24:48,910 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:24:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T17:24:48,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742112_1288 (size=399) 2024-12-07T17:24:48,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742112_1288 (size=399) 2024-12-07T17:24:48,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742112_1288 (size=399) 2024-12-07T17:24:48,946 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 24d938a75155038367ebbbeae07f4dc1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:48,947 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => e0033acfb1eec404819d264c83c486c7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:48,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742113_1289 (size=85) 2024-12-07T17:24:48,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742113_1289 (size=85) 2024-12-07T17:24:48,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742113_1289 (size=85) 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing 24d938a75155038367ebbbeae07f4dc1, disabling compactions & flushes 2024-12-07T17:24:48,966 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. after waiting 0 ms 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:48,966 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:48,966 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for 24d938a75155038367ebbbeae07f4dc1: 2024-12-07T17:24:48,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742114_1290 (size=85) 2024-12-07T17:24:48,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742114_1290 (size=85) 2024-12-07T17:24:48,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742114_1290 (size=85) 2024-12-07T17:24:48,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:48,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing e0033acfb1eec404819d264c83c486c7, disabling compactions & flushes 2024-12-07T17:24:48,975 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:48,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:48,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. after waiting 0 ms 2024-12-07T17:24:48,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:48,975 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:48,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for e0033acfb1eec404819d264c83c486c7: 2024-12-07T17:24:48,976 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:24:48,976 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733592288976"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592288976"}]},"ts":"1733592288976"} 2024-12-07T17:24:48,976 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733592288976"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592288976"}]},"ts":"1733592288976"} 2024-12-07T17:24:48,979 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:24:48,980 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:24:48,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592288980"}]},"ts":"1733592288980"} 2024-12-07T17:24:48,982 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-07T17:24:48,985 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:24:48,987 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:24:48,987 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:24:48,987 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:24:48,987 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:24:48,987 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:24:48,987 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:24:48,987 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:24:48,987 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, ASSIGN}] 2024-12-07T17:24:48,989 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, ASSIGN 2024-12-07T17:24:48,989 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, ASSIGN 2024-12-07T17:24:48,990 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:24:48,990 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:24:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T17:24:49,140 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:24:49,141 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=24d938a75155038367ebbbeae07f4dc1, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:49,141 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=e0033acfb1eec404819d264c83c486c7, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:49,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE; OpenRegionProcedure 24d938a75155038367ebbbeae07f4dc1, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:24:49,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE; OpenRegionProcedure e0033acfb1eec404819d264c83c486c7, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:24:49,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T17:24:49,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:49,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:49,300 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,301 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 24d938a75155038367ebbbeae07f4dc1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.', STARTKEY => '', ENDKEY => '2'} 2024-12-07T17:24:49,301 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. service=AccessControlService 2024-12-07T17:24:49,301 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:49,302 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => e0033acfb1eec404819d264c83c486c7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.', STARTKEY => '2', ENDKEY => ''} 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,302 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. service=AccessControlService 2024-12-07T17:24:49,302 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:49,303 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,303 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:49,303 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,303 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,305 INFO [StoreOpener-e0033acfb1eec404819d264c83c486c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,305 INFO [StoreOpener-24d938a75155038367ebbbeae07f4dc1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,307 INFO [StoreOpener-24d938a75155038367ebbbeae07f4dc1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 24d938a75155038367ebbbeae07f4dc1 columnFamilyName cf 2024-12-07T17:24:49,307 INFO [StoreOpener-e0033acfb1eec404819d264c83c486c7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e0033acfb1eec404819d264c83c486c7 columnFamilyName cf 2024-12-07T17:24:49,307 DEBUG [StoreOpener-24d938a75155038367ebbbeae07f4dc1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:49,307 DEBUG [StoreOpener-e0033acfb1eec404819d264c83c486c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:49,307 INFO [StoreOpener-24d938a75155038367ebbbeae07f4dc1-1 {}] regionserver.HStore(327): Store=24d938a75155038367ebbbeae07f4dc1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:49,307 INFO [StoreOpener-e0033acfb1eec404819d264c83c486c7-1 {}] regionserver.HStore(327): Store=e0033acfb1eec404819d264c83c486c7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:49,308 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,308 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,309 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,309 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,312 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,312 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,314 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:49,314 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:24:49,315 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 24d938a75155038367ebbbeae07f4dc1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70905678, jitterRate=0.056576937437057495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:49,315 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened e0033acfb1eec404819d264c83c486c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66653531, jitterRate=-0.006784990429878235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:49,316 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 24d938a75155038367ebbbeae07f4dc1: 2024-12-07T17:24:49,316 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for e0033acfb1eec404819d264c83c486c7: 2024-12-07T17:24:49,317 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1., pid=113, masterSystemTime=1733592289296 2024-12-07T17:24:49,317 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7., pid=114, masterSystemTime=1733592289298 2024-12-07T17:24:49,320 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,320 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,320 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=24d938a75155038367ebbbeae07f4dc1, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:49,320 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,320 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,321 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=e0033acfb1eec404819d264c83c486c7, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:49,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=111 2024-12-07T17:24:49,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=112 2024-12-07T17:24:49,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=111, state=SUCCESS; OpenRegionProcedure 24d938a75155038367ebbbeae07f4dc1, server=5c70517e8267,41313,1733592165267 in 177 msec 2024-12-07T17:24:49,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=112, state=SUCCESS; OpenRegionProcedure e0033acfb1eec404819d264c83c486c7, server=5c70517e8267,36163,1733592165482 in 178 msec 2024-12-07T17:24:49,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, ASSIGN in 338 msec 2024-12-07T17:24:49,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-07T17:24:49,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, ASSIGN in 339 msec 2024-12-07T17:24:49,329 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:24:49,329 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592289329"}]},"ts":"1733592289329"} 2024-12-07T17:24:49,330 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-07T17:24:49,332 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:24:49,333 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-07T17:24:49,335 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-07T17:24:49,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:49,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:49,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:49,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:24:49,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,339 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,340 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T17:24:49,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 433 msec 2024-12-07T17:24:49,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T17:24:49,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-07T17:24:49,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7] 2024-12-07T17:24:49,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7], force=true 2024-12-07T17:24:49,563 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7], force=true 2024-12-07T17:24:49,563 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7], force=true 2024-12-07T17:24:49,563 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7], force=true 2024-12-07T17:24:49,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T17:24:49,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, UNASSIGN}] 2024-12-07T17:24:49,584 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, UNASSIGN 2024-12-07T17:24:49,584 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, UNASSIGN 2024-12-07T17:24:49,587 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=e0033acfb1eec404819d264c83c486c7, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:24:49,588 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=24d938a75155038367ebbbeae07f4dc1, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:49,590 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:49,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE; CloseRegionProcedure e0033acfb1eec404819d264c83c486c7, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:24:49,591 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T17:24:49,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=116, state=RUNNABLE; CloseRegionProcedure 24d938a75155038367ebbbeae07f4dc1, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:24:49,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T17:24:49,743 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:49,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:24:49,743 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,743 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,743 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-07T17:24:49,743 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing e0033acfb1eec404819d264c83c486c7, disabling compactions & flushes 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 24d938a75155038367ebbbeae07f4dc1, disabling compactions & flushes 2024-12-07T17:24:49,744 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,744 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. after waiting 0 ms 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. after waiting 0 ms 2024-12-07T17:24:49,744 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,744 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing e0033acfb1eec404819d264c83c486c7 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-07T17:24:49,744 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 24d938a75155038367ebbbeae07f4dc1 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-07T17:24:49,761 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/.tmp/cf/a03a2207c15842ee89c1756e755ec215 is 28, key is 2/cf:/1733592289535/Put/seqid=0 2024-12-07T17:24:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742115_1291 (size=4945) 2024-12-07T17:24:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742115_1291 (size=4945) 2024-12-07T17:24:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742115_1291 (size=4945) 2024-12-07T17:24:49,772 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/.tmp/cf/a03a2207c15842ee89c1756e755ec215 2024-12-07T17:24:49,774 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/.tmp/cf/2edb830c42934e868b56a6eb4b4dbf7a is 28, key is 1/cf:/1733592289520/Put/seqid=0 2024-12-07T17:24:49,778 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/.tmp/cf/a03a2207c15842ee89c1756e755ec215 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215 2024-12-07T17:24:49,784 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215, entries=1, sequenceid=5, filesize=4.8 K 2024-12-07T17:24:49,785 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for e0033acfb1eec404819d264c83c486c7 in 41ms, sequenceid=5, compaction requested=false 2024-12-07T17:24:49,785 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-07T17:24:49,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742116_1292 (size=4945) 2024-12-07T17:24:49,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742116_1292 (size=4945) 2024-12-07T17:24:49,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742116_1292 (size=4945) 2024-12-07T17:24:49,795 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/.tmp/cf/2edb830c42934e868b56a6eb4b4dbf7a 2024-12-07T17:24:49,797 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:24:49,798 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:49,798 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7. 2024-12-07T17:24:49,798 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for e0033acfb1eec404819d264c83c486c7: 2024-12-07T17:24:49,800 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed e0033acfb1eec404819d264c83c486c7 2024-12-07T17:24:49,801 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=e0033acfb1eec404819d264c83c486c7, regionState=CLOSED 2024-12-07T17:24:49,802 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/.tmp/cf/2edb830c42934e868b56a6eb4b4dbf7a as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a 2024-12-07T17:24:49,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-07T17:24:49,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; CloseRegionProcedure e0033acfb1eec404819d264c83c486c7, server=5c70517e8267,36163,1733592165482 in 213 msec 2024-12-07T17:24:49,805 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e0033acfb1eec404819d264c83c486c7, UNASSIGN in 221 msec 2024-12-07T17:24:49,808 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a, entries=1, sequenceid=5, filesize=4.8 K 2024-12-07T17:24:49,809 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 24d938a75155038367ebbbeae07f4dc1 in 65ms, sequenceid=5, compaction requested=false 2024-12-07T17:24:49,815 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:24:49,816 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:24:49,841 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1. 2024-12-07T17:24:49,841 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 24d938a75155038367ebbbeae07f4dc1: 2024-12-07T17:24:49,845 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:24:49,846 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=24d938a75155038367ebbbeae07f4dc1, regionState=CLOSED 2024-12-07T17:24:49,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=116 2024-12-07T17:24:49,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=116, state=SUCCESS; CloseRegionProcedure 24d938a75155038367ebbbeae07f4dc1, server=5c70517e8267,41313,1733592165267 in 263 msec 2024-12-07T17:24:49,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-07T17:24:49,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=24d938a75155038367ebbbeae07f4dc1, UNASSIGN in 272 msec 2024-12-07T17:24:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T17:24:49,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742117_1293 (size=84) 2024-12-07T17:24:49,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742117_1293 (size=84) 2024-12-07T17:24:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742117_1293 (size=84) 2024-12-07T17:24:49,901 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:49,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742118_1294 (size=20) 2024-12-07T17:24:49,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742118_1294 (size=20) 2024-12-07T17:24:49,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742118_1294 (size=20) 2024-12-07T17:24:49,923 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:49,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742119_1295 (size=21) 2024-12-07T17:24:49,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742119_1295 (size=21) 2024-12-07T17:24:49,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742119_1295 (size=21) 2024-12-07T17:24:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742120_1296 (size=84) 2024-12-07T17:24:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742120_1296 (size=84) 2024-12-07T17:24:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742120_1296 (size=84) 2024-12-07T17:24:49,948 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:49,959 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-07T17:24:49,962 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288904.24d938a75155038367ebbbeae07f4dc1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:49,962 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733592288904.e0033acfb1eec404819d264c83c486c7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:49,962 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T17:24:50,001 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, ASSIGN}] 2024-12-07T17:24:50,002 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, ASSIGN 2024-12-07T17:24:50,002 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, ASSIGN; state=MERGED, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:24:50,153 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T17:24:50,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=4c981ac2f2269323a666f3595ec4f10b, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:50,160 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 4c981ac2f2269323a666f3595ec4f10b, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:24:50,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T17:24:50,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:50,325 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:24:50,325 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 4c981ac2f2269323a666f3595ec4f10b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.', STARTKEY => '', ENDKEY => ''} 2024-12-07T17:24:50,326 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. service=AccessControlService 2024-12-07T17:24:50,326 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:24:50,326 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,326 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:24:50,326 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,326 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,327 INFO [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,328 INFO [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c981ac2f2269323a666f3595ec4f10b columnFamilyName cf 2024-12-07T17:24:50,329 DEBUG [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:24:50,350 DEBUG [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/2edb830c42934e868b56a6eb4b4dbf7a.24d938a75155038367ebbbeae07f4dc1->hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a-top 2024-12-07T17:24:50,356 DEBUG [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/a03a2207c15842ee89c1756e755ec215.e0033acfb1eec404819d264c83c486c7->hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215-top 2024-12-07T17:24:50,357 INFO [StoreOpener-4c981ac2f2269323a666f3595ec4f10b-1 {}] regionserver.HStore(327): Store=4c981ac2f2269323a666f3595ec4f10b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:24:50,357 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,358 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,361 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,362 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 4c981ac2f2269323a666f3595ec4f10b; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69592086, jitterRate=0.037002891302108765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:24:50,362 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 4c981ac2f2269323a666f3595ec4f10b: 2024-12-07T17:24:50,363 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b., pid=121, masterSystemTime=1733592290321 2024-12-07T17:24:50,363 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.,because compaction is disabled. 2024-12-07T17:24:50,365 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:24:50,365 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:24:50,366 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=4c981ac2f2269323a666f3595ec4f10b, regionState=OPEN, openSeqNum=9, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:24:50,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-07T17:24:50,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 4c981ac2f2269323a666f3595ec4f10b, server=5c70517e8267,41313,1733592165267 in 207 msec 2024-12-07T17:24:50,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-07T17:24:50,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, ASSIGN in 369 msec 2024-12-07T17:24:50,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[24d938a75155038367ebbbeae07f4dc1, e0033acfb1eec404819d264c83c486c7], force=true in 813 msec 2024-12-07T17:24:50,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T17:24:50,672 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-07T17:24:50,672 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-07T17:24:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592290672 (current time:1733592290672). 2024-12-07T17:24:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:24:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-07T17:24:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:24:50,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58fe489f to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@340d6ba3 2024-12-07T17:24:50,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@479c94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:50,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:50,679 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58fe489f to 127.0.0.1:58133 2024-12-07T17:24:50,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3bb2bf92 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@643d8b34 2024-12-07T17:24:50,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58a2181b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:24:50,686 DEBUG [hconnection-0x5cb8f279-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:50,687 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:24:50,690 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:24:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3bb2bf92 to 127.0.0.1:58133 2024-12-07T17:24:50,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:24:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-07T17:24:50,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:24:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-07T17:24:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-07T17:24:50,694 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:24:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T17:24:50,695 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:24:50,697 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:24:50,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742121_1297 (size=216) 2024-12-07T17:24:50,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742121_1297 (size=216) 2024-12-07T17:24:50,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742121_1297 (size=216) 2024-12-07T17:24:50,704 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:24:50,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4c981ac2f2269323a666f3595ec4f10b}] 2024-12-07T17:24:50,705 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T17:24:50,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:24:50,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-07T17:24:50,861 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 4c981ac2f2269323a666f3595ec4f10b: 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/2edb830c42934e868b56a6eb4b4dbf7a.24d938a75155038367ebbbeae07f4dc1->hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a-top, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/a03a2207c15842ee89c1756e755ec215.e0033acfb1eec404819d264c83c486c7->hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215-top] hfiles 2024-12-07T17:24:50,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/2edb830c42934e868b56a6eb4b4dbf7a.24d938a75155038367ebbbeae07f4dc1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/a03a2207c15842ee89c1756e755ec215.e0033acfb1eec404819d264c83c486c7 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742122_1298 (size=269) 2024-12-07T17:24:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742122_1298 (size=269) 2024-12-07T17:24:50,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742122_1298 (size=269) 2024-12-07T17:24:50,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:24:50,876 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-07T17:24:50,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-07T17:24:50,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,877 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:24:50,879 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-07T17:24:50,880 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 4c981ac2f2269323a666f3595ec4f10b in 174 msec 2024-12-07T17:24:50,880 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:24:50,880 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:24:50,881 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:24:50,881 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,882 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742123_1299 (size=670) 2024-12-07T17:24:50,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742123_1299 (size=670) 2024-12-07T17:24:50,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742123_1299 (size=670) 2024-12-07T17:24:50,898 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:24:50,907 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:24:50,907 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:50,909 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:24:50,909 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-07T17:24:50,910 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 216 msec 2024-12-07T17:24:50,940 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0005_000001 (auth:SIMPLE) from 127.0.0.1:50684 2024-12-07T17:24:50,958 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000001/launch_container.sh] 2024-12-07T17:24:50,958 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000001/container_tokens] 2024-12-07T17:24:50,958 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0005/container_1733592173099_0005_01_000001/sysfs] 2024-12-07T17:24:50,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T17:24:50,997 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-07T17:24:50,997 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997 2024-12-07T17:24:50,997 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:51,027 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:24:51,027 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:51,029 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:24:51,035 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:51,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742124_1300 (size=216) 2024-12-07T17:24:51,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742124_1300 (size=216) 2024-12-07T17:24:51,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742124_1300 (size=216) 2024-12-07T17:24:51,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742125_1301 (size=670) 2024-12-07T17:24:51,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742125_1301 (size=670) 2024-12-07T17:24:51,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742125_1301 (size=670) 2024-12-07T17:24:51,047 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:51,047 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:51,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:51,048 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,061 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:24:52,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-6309258819685861765.jar 2024-12-07T17:24:52,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-8842453612529283941.jar 2024-12-07T17:24:52,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,201 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:24:52,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:24:52,202 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:24:52,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:24:52,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:24:52,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:24:52,203 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:24:52,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:24:52,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:24:52,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:24:52,204 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:24:52,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:24:52,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:24:52,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:52,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:52,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:52,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:52,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:24:52,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:52,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:24:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742126_1302 (size=127628) 2024-12-07T17:24:52,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742126_1302 (size=127628) 2024-12-07T17:24:52,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742126_1302 (size=127628) 2024-12-07T17:24:52,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T17:24:52,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T17:24:52,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T17:24:52,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742128_1304 (size=213228) 2024-12-07T17:24:52,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742128_1304 (size=213228) 2024-12-07T17:24:52,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742128_1304 (size=213228) 2024-12-07T17:24:52,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T17:24:52,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T17:24:52,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T17:24:52,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742130_1306 (size=451756) 2024-12-07T17:24:52,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742130_1306 (size=451756) 2024-12-07T17:24:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742130_1306 (size=451756) 2024-12-07T17:24:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742131_1307 (size=533455) 2024-12-07T17:24:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742131_1307 (size=533455) 2024-12-07T17:24:52,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742131_1307 (size=533455) 2024-12-07T17:24:52,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742132_1308 (size=7280644) 2024-12-07T17:24:52,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742132_1308 (size=7280644) 2024-12-07T17:24:52,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742132_1308 (size=7280644) 2024-12-07T17:24:52,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742133_1309 (size=4188619) 2024-12-07T17:24:52,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742133_1309 (size=4188619) 2024-12-07T17:24:52,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742133_1309 (size=4188619) 2024-12-07T17:24:52,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742134_1310 (size=20406) 2024-12-07T17:24:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742134_1310 (size=20406) 2024-12-07T17:24:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742134_1310 (size=20406) 2024-12-07T17:24:52,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742135_1311 (size=75495) 2024-12-07T17:24:52,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742135_1311 (size=75495) 2024-12-07T17:24:52,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742135_1311 (size=75495) 2024-12-07T17:24:52,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742136_1312 (size=45609) 2024-12-07T17:24:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742136_1312 (size=45609) 2024-12-07T17:24:52,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742136_1312 (size=45609) 2024-12-07T17:24:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742137_1313 (size=110084) 2024-12-07T17:24:52,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742137_1313 (size=110084) 2024-12-07T17:24:52,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742137_1313 (size=110084) 2024-12-07T17:24:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T17:24:52,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T17:24:52,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T17:24:52,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742139_1315 (size=23076) 2024-12-07T17:24:52,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742139_1315 (size=23076) 2024-12-07T17:24:52,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742139_1315 (size=23076) 2024-12-07T17:24:52,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742140_1316 (size=126803) 2024-12-07T17:24:52,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742140_1316 (size=126803) 2024-12-07T17:24:52,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742140_1316 (size=126803) 2024-12-07T17:24:52,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742141_1317 (size=322274) 2024-12-07T17:24:52,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742141_1317 (size=322274) 2024-12-07T17:24:52,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742141_1317 (size=322274) 2024-12-07T17:24:52,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T17:24:52,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T17:24:52,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T17:24:52,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742143_1319 (size=6350154) 2024-12-07T17:24:52,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742143_1319 (size=6350154) 2024-12-07T17:24:52,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742143_1319 (size=6350154) 2024-12-07T17:24:52,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742144_1320 (size=30081) 2024-12-07T17:24:52,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742144_1320 (size=30081) 2024-12-07T17:24:52,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742144_1320 (size=30081) 2024-12-07T17:24:52,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742145_1321 (size=53616) 2024-12-07T17:24:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742145_1321 (size=53616) 2024-12-07T17:24:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742145_1321 (size=53616) 2024-12-07T17:24:52,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742146_1322 (size=29229) 2024-12-07T17:24:52,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742146_1322 (size=29229) 2024-12-07T17:24:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742146_1322 (size=29229) 2024-12-07T17:24:52,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742147_1323 (size=169089) 2024-12-07T17:24:52,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742147_1323 (size=169089) 2024-12-07T17:24:52,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742147_1323 (size=169089) 2024-12-07T17:24:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T17:24:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T17:24:52,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T17:24:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742149_1325 (size=136454) 2024-12-07T17:24:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742149_1325 (size=136454) 2024-12-07T17:24:52,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742149_1325 (size=136454) 2024-12-07T17:24:52,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742150_1326 (size=907853) 2024-12-07T17:24:52,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742150_1326 (size=907853) 2024-12-07T17:24:52,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742150_1326 (size=907853) 2024-12-07T17:24:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T17:24:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T17:24:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T17:24:52,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742152_1328 (size=503880) 2024-12-07T17:24:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742152_1328 (size=503880) 2024-12-07T17:24:52,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742152_1328 (size=503880) 2024-12-07T17:24:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T17:24:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T17:24:52,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T17:24:52,721 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:24:52,723 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-07T17:24:52,725 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-07T17:24:52,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742154_1330 (size=378) 2024-12-07T17:24:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742154_1330 (size=378) 2024-12-07T17:24:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742154_1330 (size=378) 2024-12-07T17:24:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742155_1331 (size=15) 2024-12-07T17:24:52,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742155_1331 (size=15) 2024-12-07T17:24:52,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742155_1331 (size=15) 2024-12-07T17:24:52,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742156_1332 (size=304940) 2024-12-07T17:24:52,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742156_1332 (size=304940) 2024-12-07T17:24:52,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742156_1332 (size=304940) 2024-12-07T17:24:52,771 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:52,771 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:24:52,779 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0006_000001 (auth:SIMPLE) from 127.0.0.1:52244 2024-12-07T17:24:54,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:24:54,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-07T17:24:54,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:24:54,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-07T17:24:54,862 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-07T17:24:59,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0006_000001 (auth:SIMPLE) from 127.0.0.1:57078 2024-12-07T17:24:59,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742157_1333 (size=350614) 2024-12-07T17:24:59,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742157_1333 (size=350614) 2024-12-07T17:24:59,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742157_1333 (size=350614) 2024-12-07T17:25:00,364 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:25:01,607 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0006_000001 (auth:SIMPLE) from 127.0.0.1:52256 2024-12-07T17:25:05,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742158_1334 (size=4945) 2024-12-07T17:25:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742158_1334 (size=4945) 2024-12-07T17:25:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742158_1334 (size=4945) 2024-12-07T17:25:05,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742159_1335 (size=4945) 2024-12-07T17:25:05,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742159_1335 (size=4945) 2024-12-07T17:25:05,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742159_1335 (size=4945) 2024-12-07T17:25:06,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742160_1336 (size=17474) 2024-12-07T17:25:06,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742160_1336 (size=17474) 2024-12-07T17:25:06,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742160_1336 (size=17474) 2024-12-07T17:25:06,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742161_1337 (size=482) 2024-12-07T17:25:06,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742161_1337 (size=482) 2024-12-07T17:25:06,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742161_1337 (size=482) 2024-12-07T17:25:06,086 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000002/launch_container.sh] 2024-12-07T17:25:06,086 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000002/container_tokens] 2024-12-07T17:25:06,086 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000002/sysfs] 2024-12-07T17:25:06,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742162_1338 (size=17474) 2024-12-07T17:25:06,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742162_1338 (size=17474) 2024-12-07T17:25:06,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742162_1338 (size=17474) 2024-12-07T17:25:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742163_1339 (size=350614) 2024-12-07T17:25:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742163_1339 (size=350614) 2024-12-07T17:25:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742163_1339 (size=350614) 2024-12-07T17:25:06,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0006_000001 (auth:SIMPLE) from 127.0.0.1:44776 2024-12-07T17:25:08,154 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:25:08,155 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:25:08,163 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,163 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:25:08,164 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:25:08,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-07T17:25:08,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-07T17:25:08,164 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-07T17:25:08,165 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592290997/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-07T17:25:08,174 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T17:25:08,177 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592308177"}]},"ts":"1733592308177"} 2024-12-07T17:25:08,179 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-07T17:25:08,181 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-07T17:25:08,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-07T17:25:08,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, UNASSIGN}] 2024-12-07T17:25:08,183 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, UNASSIGN 2024-12-07T17:25:08,184 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=4c981ac2f2269323a666f3595ec4f10b, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:08,185 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:08,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 4c981ac2f2269323a666f3595ec4f10b, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T17:25:08,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:08,337 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:25:08,337 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:08,337 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 4c981ac2f2269323a666f3595ec4f10b, disabling compactions & flushes 2024-12-07T17:25:08,337 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:25:08,337 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:25:08,337 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. after waiting 0 ms 2024-12-07T17:25:08,337 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:25:08,344 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-07T17:25:08,352 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:08,353 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b. 2024-12-07T17:25:08,353 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 4c981ac2f2269323a666f3595ec4f10b: 2024-12-07T17:25:08,355 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:25:08,355 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=4c981ac2f2269323a666f3595ec4f10b, regionState=CLOSED 2024-12-07T17:25:08,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-07T17:25:08,360 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 4c981ac2f2269323a666f3595ec4f10b, server=5c70517e8267,41313,1733592165267 in 173 msec 2024-12-07T17:25:08,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-07T17:25:08,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4c981ac2f2269323a666f3595ec4f10b, UNASSIGN in 178 msec 2024-12-07T17:25:08,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-07T17:25:08,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 183 msec 2024-12-07T17:25:08,367 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592308367"}]},"ts":"1733592308367"} 2024-12-07T17:25:08,370 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-07T17:25:08,372 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-07T17:25:08,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 199 msec 2024-12-07T17:25:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T17:25:08,479 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-07T17:25:08,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,482 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,483 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,485 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,486 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:25:08,486 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:25:08,487 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7 2024-12-07T17:25:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,488 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T17:25:08,489 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/recovered.edits] 2024-12-07T17:25:08,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T17:25:08,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T17:25:08,489 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,490 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/recovered.edits] 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-07T17:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:08,493 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/recovered.edits] 2024-12-07T17:25:08,494 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/2edb830c42934e868b56a6eb4b4dbf7a.24d938a75155038367ebbbeae07f4dc1 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/2edb830c42934e868b56a6eb4b4dbf7a.24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:25:08,495 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/a03a2207c15842ee89c1756e755ec215.e0033acfb1eec404819d264c83c486c7 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/cf/a03a2207c15842ee89c1756e755ec215.e0033acfb1eec404819d264c83c486c7 2024-12-07T17:25:08,496 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/cf/2edb830c42934e868b56a6eb4b4dbf7a 2024-12-07T17:25:08,499 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/cf/a03a2207c15842ee89c1756e755ec215 2024-12-07T17:25:08,499 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/recovered.edits/12.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b/recovered.edits/12.seqid 2024-12-07T17:25:08,500 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4c981ac2f2269323a666f3595ec4f10b 2024-12-07T17:25:08,506 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/recovered.edits/8.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7/recovered.edits/8.seqid 2024-12-07T17:25:08,507 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e0033acfb1eec404819d264c83c486c7 2024-12-07T17:25:08,508 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/recovered.edits/8.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1/recovered.edits/8.seqid 2024-12-07T17:25:08,508 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/24d938a75155038367ebbbeae07f4dc1 2024-12-07T17:25:08,508 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-07T17:25:08,511 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,515 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-07T17:25:08,518 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-07T17:25:08,519 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,519 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-07T17:25:08,520 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592308519"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:08,525 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T17:25:08,525 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4c981ac2f2269323a666f3595ec4f10b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T17:25:08,525 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-07T17:25:08,526 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592308525"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:08,531 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-07T17:25:08,535 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:08,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 55 msec 2024-12-07T17:25:08,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-07T17:25:08,592 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-07T17:25:08,593 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,597 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592308597"}]},"ts":"1733592308597"} 2024-12-07T17:25:08,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T17:25:08,599 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-07T17:25:08,608 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-07T17:25:08,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-07T17:25:08,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, UNASSIGN}] 2024-12-07T17:25:08,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, UNASSIGN 2024-12-07T17:25:08,611 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, UNASSIGN 2024-12-07T17:25:08,612 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=4c8dcef6eef6c6ffe6eec4eeefe6c98b, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:25:08,612 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7e007c78d4bbd37599fc2cd27d69bfde, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:08,613 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:08,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:25:08,614 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:08,614 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:25:08,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T17:25:08,765 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:25:08,766 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:25:08,766 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:08,766 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:08,766 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 4c8dcef6eef6c6ffe6eec4eeefe6c98b, disabling compactions & flushes 2024-12-07T17:25:08,766 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:25:08,766 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:25:08,766 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. after waiting 0 ms 2024-12-07T17:25:08,766 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:25:08,767 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:25:08,767 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:08,767 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 7e007c78d4bbd37599fc2cd27d69bfde, disabling compactions & flushes 2024-12-07T17:25:08,767 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:25:08,767 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:25:08,767 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. after waiting 0 ms 2024-12-07T17:25:08,768 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:25:08,779 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:08,780 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:08,781 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b. 2024-12-07T17:25:08,781 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 4c8dcef6eef6c6ffe6eec4eeefe6c98b: 2024-12-07T17:25:08,781 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:08,782 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:08,782 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde. 2024-12-07T17:25:08,782 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 7e007c78d4bbd37599fc2cd27d69bfde: 2024-12-07T17:25:08,783 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:25:08,783 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=4c8dcef6eef6c6ffe6eec4eeefe6c98b, regionState=CLOSED 2024-12-07T17:25:08,784 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:25:08,786 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=7e007c78d4bbd37599fc2cd27d69bfde, regionState=CLOSED 2024-12-07T17:25:08,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-07T17:25:08,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131 2024-12-07T17:25:08,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure 7e007c78d4bbd37599fc2cd27d69bfde, server=5c70517e8267,36163,1733592165482 in 175 msec 2024-12-07T17:25:08,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 4c8dcef6eef6c6ffe6eec4eeefe6c98b, server=5c70517e8267,38423,1733592165361 in 175 msec 2024-12-07T17:25:08,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4c8dcef6eef6c6ffe6eec4eeefe6c98b, UNASSIGN in 180 msec 2024-12-07T17:25:08,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-07T17:25:08,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=7e007c78d4bbd37599fc2cd27d69bfde, UNASSIGN in 181 msec 2024-12-07T17:25:08,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-07T17:25:08,794 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 184 msec 2024-12-07T17:25:08,795 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592308795"}]},"ts":"1733592308795"} 2024-12-07T17:25:08,798 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-07T17:25:08,802 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-07T17:25:08,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 209 msec 2024-12-07T17:25:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T17:25:08,901 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-07T17:25:08,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,904 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,905 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,906 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,908 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:25:08,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,911 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,912 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T17:25:08,913 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T17:25:08,913 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T17:25:08,913 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T17:25:08,914 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/recovered.edits] 2024-12-07T17:25:08,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-07T17:25:08,920 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/cf/910ffb65130c4d08bbfd48eb9c39924a 2024-12-07T17:25:08,925 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:25:08,928 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/recovered.edits] 2024-12-07T17:25:08,931 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde/recovered.edits/9.seqid 2024-12-07T17:25:08,932 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/7e007c78d4bbd37599fc2cd27d69bfde 2024-12-07T17:25:08,934 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/cf/f51a36416d2a4b33ae5dc9b6dd78dd59 2024-12-07T17:25:08,937 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b/recovered.edits/9.seqid 2024-12-07T17:25:08,938 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithMergeRegion/4c8dcef6eef6c6ffe6eec4eeefe6c98b 2024-12-07T17:25:08,938 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-07T17:25:08,941 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,946 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-07T17:25:08,949 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-07T17:25:08,950 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,950 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-07T17:25:08,951 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592308950"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:08,951 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592308950"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:08,953 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:25:08,953 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7e007c78d4bbd37599fc2cd27d69bfde, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733592286851.7e007c78d4bbd37599fc2cd27d69bfde.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4c8dcef6eef6c6ffe6eec4eeefe6c98b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733592286851.4c8dcef6eef6c6ffe6eec4eeefe6c98b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:25:08,953 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-07T17:25:08,953 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592308953"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:08,956 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-07T17:25:08,958 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:08,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 56 msec 2024-12-07T17:25:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-07T17:25:09,018 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-07T17:25:09,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-07T17:25:09,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:09,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-07T17:25:09,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:09,036 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-07T17:25:09,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:09,074 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=794 (was 780) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 32758) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4851 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:35010 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:37862 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42193 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_684994072_1 at /127.0.0.1:34984 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:52566 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:42193 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5c70517e8267:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_684994072_1 at /127.0.0.1:52534 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=793 (was 789) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=565 (was 577), ProcessCount=20 (was 20), AvailableMemoryMB=4231 (was 4531) 2024-12-07T17:25:09,075 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-12-07T17:25:09,094 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=794, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=565, ProcessCount=20, AvailableMemoryMB=4229 2024-12-07T17:25:09,094 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=794 is superior to 500 2024-12-07T17:25:09,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:25:09,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:09,100 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:25:09,100 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:09,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-07T17:25:09,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T17:25:09,102 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:25:09,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742164_1340 (size=407) 2024-12-07T17:25:09,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742164_1340 (size=407) 2024-12-07T17:25:09,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742164_1340 (size=407) 2024-12-07T17:25:09,127 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 0896c53167410c62154cbc0187feec1d, NAME => 'testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:09,127 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3fd7e64b6fc687219db67183a42fd845, NAME => 'testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:09,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742166_1342 (size=68) 2024-12-07T17:25:09,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742166_1342 (size=68) 2024-12-07T17:25:09,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742166_1342 (size=68) 2024-12-07T17:25:09,153 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:09,153 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 3fd7e64b6fc687219db67183a42fd845, disabling compactions & flushes 2024-12-07T17:25:09,154 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,154 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,154 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. after waiting 0 ms 2024-12-07T17:25:09,154 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,154 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,154 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3fd7e64b6fc687219db67183a42fd845: 2024-12-07T17:25:09,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742165_1341 (size=68) 2024-12-07T17:25:09,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742165_1341 (size=68) 2024-12-07T17:25:09,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742165_1341 (size=68) 2024-12-07T17:25:09,173 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:09,173 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 0896c53167410c62154cbc0187feec1d, disabling compactions & flushes 2024-12-07T17:25:09,173 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,173 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,173 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. after waiting 0 ms 2024-12-07T17:25:09,173 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,173 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,174 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 0896c53167410c62154cbc0187feec1d: 2024-12-07T17:25:09,175 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:25:09,176 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733592309175"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592309175"}]},"ts":"1733592309175"} 2024-12-07T17:25:09,176 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733592309175"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592309175"}]},"ts":"1733592309175"} 2024-12-07T17:25:09,179 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:25:09,180 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:25:09,180 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592309180"}]},"ts":"1733592309180"} 2024-12-07T17:25:09,182 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-07T17:25:09,186 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:25:09,188 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:25:09,188 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:25:09,188 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:25:09,188 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:25:09,188 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:25:09,188 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:25:09,188 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:25:09,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, ASSIGN}] 2024-12-07T17:25:09,189 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, ASSIGN 2024-12-07T17:25:09,191 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:25:09,192 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, ASSIGN 2024-12-07T17:25:09,193 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:25:09,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T17:25:09,341 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:25:09,342 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=3fd7e64b6fc687219db67183a42fd845, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:09,342 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=0896c53167410c62154cbc0187feec1d, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:09,344 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure 3fd7e64b6fc687219db67183a42fd845, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:09,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure 0896c53167410c62154cbc0187feec1d, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:25:09,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T17:25:09,496 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:09,497 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:09,500 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,500 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => 3fd7e64b6fc687219db67183a42fd845, NAME => 'testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:25:09,500 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. service=AccessControlService 2024-12-07T17:25:09,501 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:09,501 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,501 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:09,501 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,501 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,502 INFO [StoreOpener-3fd7e64b6fc687219db67183a42fd845-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,504 INFO [StoreOpener-3fd7e64b6fc687219db67183a42fd845-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fd7e64b6fc687219db67183a42fd845 columnFamilyName cf 2024-12-07T17:25:09,504 DEBUG [StoreOpener-3fd7e64b6fc687219db67183a42fd845-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:09,504 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,504 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => 0896c53167410c62154cbc0187feec1d, NAME => 'testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:25:09,504 INFO [StoreOpener-3fd7e64b6fc687219db67183a42fd845-1 {}] regionserver.HStore(327): Store=3fd7e64b6fc687219db67183a42fd845/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:09,504 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. service=AccessControlService 2024-12-07T17:25:09,504 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,505 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,507 INFO [StoreOpener-0896c53167410c62154cbc0187feec1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,508 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,510 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:09,510 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened 3fd7e64b6fc687219db67183a42fd845; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65141457, jitterRate=-0.029316648840904236}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:09,511 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for 3fd7e64b6fc687219db67183a42fd845: 2024-12-07T17:25:09,512 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845., pid=139, masterSystemTime=1733592309496 2024-12-07T17:25:09,513 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,513 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,514 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=3fd7e64b6fc687219db67183a42fd845, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:09,516 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-07T17:25:09,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure 3fd7e64b6fc687219db67183a42fd845, server=5c70517e8267,41313,1733592165267 in 171 msec 2024-12-07T17:25:09,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, ASSIGN in 329 msec 2024-12-07T17:25:09,520 INFO [StoreOpener-0896c53167410c62154cbc0187feec1d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0896c53167410c62154cbc0187feec1d columnFamilyName cf 2024-12-07T17:25:09,520 DEBUG [StoreOpener-0896c53167410c62154cbc0187feec1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:09,521 INFO [StoreOpener-0896c53167410c62154cbc0187feec1d-1 {}] regionserver.HStore(327): Store=0896c53167410c62154cbc0187feec1d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:09,522 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,522 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,524 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,528 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:09,529 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened 0896c53167410c62154cbc0187feec1d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74636938, jitterRate=0.1121770441532135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:09,529 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for 0896c53167410c62154cbc0187feec1d: 2024-12-07T17:25:09,530 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d., pid=140, masterSystemTime=1733592309497 2024-12-07T17:25:09,531 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,531 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,532 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=0896c53167410c62154cbc0187feec1d, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:09,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-07T17:25:09,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure 0896c53167410c62154cbc0187feec1d, server=5c70517e8267,36163,1733592165482 in 188 msec 2024-12-07T17:25:09,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=136 2024-12-07T17:25:09,537 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, ASSIGN in 347 msec 2024-12-07T17:25:09,537 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:25:09,537 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592309537"}]},"ts":"1733592309537"} 2024-12-07T17:25:09,539 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-07T17:25:09,542 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:25:09,542 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-07T17:25:09,544 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T17:25:09,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:09,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:09,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:09,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:09,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:09,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:09,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:09,548 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:09,550 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 453 msec 2024-12-07T17:25:09,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T17:25:09,706 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-07T17:25:09,706 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-07T17:25:09,706 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:09,710 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-07T17:25:09,710 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:09,711 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-07T17:25:09,714 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T17:25:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592309714 (current time:1733592309714). 2024-12-07T17:25:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T17:25:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:09,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11830e83 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@557f9bce 2024-12-07T17:25:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a03fee0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:09,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:09,723 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11830e83 to 127.0.0.1:58133 2024-12-07T17:25:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1621bd4e to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fa9f5af 2024-12-07T17:25:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aa8f844, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:09,732 DEBUG [hconnection-0x2f6b986e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:09,733 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:09,736 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1621bd4e to 127.0.0.1:58133 2024-12-07T17:25:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T17:25:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T17:25:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-07T17:25:09,740 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:09,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T17:25:09,741 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:09,744 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:09,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742167_1343 (size=170) 2024-12-07T17:25:09,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742167_1343 (size=170) 2024-12-07T17:25:09,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742167_1343 (size=170) 2024-12-07T17:25:09,758 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:09,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d}] 2024-12-07T17:25:09,759 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,759 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T17:25:09,910 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:09,910 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:09,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-07T17:25:09,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 0896c53167410c62154cbc0187feec1d: 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for 3fd7e64b6fc687219db67183a42fd845: 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:09,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:09,912 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:09,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742168_1344 (size=71) 2024-12-07T17:25:09,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742168_1344 (size=71) 2024-12-07T17:25:09,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742168_1344 (size=71) 2024-12-07T17:25:09,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:09,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-07T17:25:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-07T17:25:09,937 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,937 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:09,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d in 180 msec 2024-12-07T17:25:09,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742169_1345 (size=71) 2024-12-07T17:25:09,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742169_1345 (size=71) 2024-12-07T17:25:09,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742169_1345 (size=71) 2024-12-07T17:25:09,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:09,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-07T17:25:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-07T17:25:09,948 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,948 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:09,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-07T17:25:09,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 in 191 msec 2024-12-07T17:25:09,951 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:09,952 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:09,953 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:09,953 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:09,954 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:09,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742170_1346 (size=552) 2024-12-07T17:25:09,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742170_1346 (size=552) 2024-12-07T17:25:09,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742170_1346 (size=552) 2024-12-07T17:25:09,990 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:09,995 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:09,995 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:09,997 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:09,997 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-07T17:25:09,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 259 msec 2024-12-07T17:25:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T17:25:10,043 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-07T17:25:10,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:10,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:10,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-07T17:25:10,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:10,076 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:10,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T17:25:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592310089 (current time:1733592310089). 2024-12-07T17:25:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T17:25:10,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:10,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4928aeb0 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3af3f713 2024-12-07T17:25:10,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527818d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:10,096 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:10,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4928aeb0 to 127.0.0.1:58133 2024-12-07T17:25:10,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62ffbacb to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@479182e8 2024-12-07T17:25:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55bd8c64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:10,103 DEBUG [hconnection-0x4dafc807-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:10,104 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:10,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:10,107 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:10,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62ffbacb to 127.0.0.1:58133 2024-12-07T17:25:10,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:10,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T17:25:10,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T17:25:10,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-07T17:25:10,110 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:10,112 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:10,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T17:25:10,114 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742171_1347 (size=165) 2024-12-07T17:25:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742171_1347 (size=165) 2024-12-07T17:25:10,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742171_1347 (size=165) 2024-12-07T17:25:10,126 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:10,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d}] 2024-12-07T17:25:10,127 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:10,127 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:10,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T17:25:10,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:10,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:10,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-07T17:25:10,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-07T17:25:10,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:10,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:10,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 0896c53167410c62154cbc0187feec1d 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-07T17:25:10,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 3fd7e64b6fc687219db67183a42fd845 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-07T17:25:10,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/.tmp/cf/e37a2b4d385d4db3a3bba45353db35d8 is 71, key is 10870794d6bf1d331dd5714fbf3545d3/cf:q/1733592310061/Put/seqid=0 2024-12-07T17:25:10,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/.tmp/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f is 71, key is 0ec3719ea370329e68ea6f3f42d65a93/cf:q/1733592310060/Put/seqid=0 2024-12-07T17:25:10,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742172_1348 (size=8392) 2024-12-07T17:25:10,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742172_1348 (size=8392) 2024-12-07T17:25:10,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742172_1348 (size=8392) 2024-12-07T17:25:10,322 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/.tmp/cf/e37a2b4d385d4db3a3bba45353db35d8 2024-12-07T17:25:10,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/.tmp/cf/e37a2b4d385d4db3a3bba45353db35d8 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8 2024-12-07T17:25:10,356 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8, entries=48, sequenceid=6, filesize=8.2 K 2024-12-07T17:25:10,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 0896c53167410c62154cbc0187feec1d in 78ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:10,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-07T17:25:10,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 0896c53167410c62154cbc0187feec1d: 2024-12-07T17:25:10,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. for snaptb0-testExportExpiredSnapshot completed. 2024-12-07T17:25:10,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:10,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8] hfiles 2024-12-07T17:25:10,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742173_1349 (size=5216) 2024-12-07T17:25:10,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742173_1349 (size=5216) 2024-12-07T17:25:10,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742173_1349 (size=5216) 2024-12-07T17:25:10,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/.tmp/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f 2024-12-07T17:25:10,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/.tmp/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f 2024-12-07T17:25:10,378 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f, entries=2, sequenceid=6, filesize=5.1 K 2024-12-07T17:25:10,379 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 3fd7e64b6fc687219db67183a42fd845 in 100ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 3fd7e64b6fc687219db67183a42fd845: 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. for snaptb0-testExportExpiredSnapshot completed. 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f] hfiles 2024-12-07T17:25:10,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742174_1350 (size=110) 2024-12-07T17:25:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742174_1350 (size=110) 2024-12-07T17:25:10,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742174_1350 (size=110) 2024-12-07T17:25:10,394 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:10,395 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-07T17:25:10,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-07T17:25:10,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:10,395 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:10,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 0896c53167410c62154cbc0187feec1d in 270 msec 2024-12-07T17:25:10,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T17:25:10,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742175_1351 (size=110) 2024-12-07T17:25:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742175_1351 (size=110) 2024-12-07T17:25:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742175_1351 (size=110) 2024-12-07T17:25:10,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:10,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-07T17:25:10,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-07T17:25:10,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:10,425 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:10,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-07T17:25:10,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure 3fd7e64b6fc687219db67183a42fd845 in 300 msec 2024-12-07T17:25:10,427 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:10,428 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:10,429 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:10,429 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,429 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742176_1352 (size=630) 2024-12-07T17:25:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742176_1352 (size=630) 2024-12-07T17:25:10,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742176_1352 (size=630) 2024-12-07T17:25:10,450 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:10,458 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:10,458 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:10,459 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:10,460 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-07T17:25:10,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 351 msec 2024-12-07T17:25:10,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T17:25:10,716 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-07T17:25:10,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:25:10,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-07T17:25:10,719 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:25:10,719 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:10,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-07T17:25:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T17:25:10,724 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:25:10,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742177_1353 (size=400) 2024-12-07T17:25:10,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742177_1353 (size=400) 2024-12-07T17:25:10,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742177_1353 (size=400) 2024-12-07T17:25:10,738 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 0138678f1a360d7573c5cfe5f7d0a7e0, NAME => 'testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:10,738 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, NAME => 'testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:10,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742178_1354 (size=61) 2024-12-07T17:25:10,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742178_1354 (size=61) 2024-12-07T17:25:10,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742178_1354 (size=61) 2024-12-07T17:25:10,765 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:10,765 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 0138678f1a360d7573c5cfe5f7d0a7e0, disabling compactions & flushes 2024-12-07T17:25:10,765 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:10,766 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:10,766 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. after waiting 0 ms 2024-12-07T17:25:10,766 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:10,766 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:10,766 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 0138678f1a360d7573c5cfe5f7d0a7e0: 2024-12-07T17:25:10,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742179_1355 (size=61) 2024-12-07T17:25:10,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742179_1355 (size=61) 2024-12-07T17:25:10,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742179_1355 (size=61) 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, disabling compactions & flushes 2024-12-07T17:25:10,775 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. after waiting 0 ms 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:10,775 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:10,775 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5: 2024-12-07T17:25:10,777 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:25:10,777 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733592310777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592310777"}]},"ts":"1733592310777"} 2024-12-07T17:25:10,777 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733592310777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592310777"}]},"ts":"1733592310777"} 2024-12-07T17:25:10,779 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:25:10,780 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:25:10,780 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592310780"}]},"ts":"1733592310780"} 2024-12-07T17:25:10,784 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-07T17:25:10,788 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:25:10,790 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:25:10,790 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:25:10,790 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:25:10,790 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:25:10,790 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:25:10,790 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:25:10,790 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:25:10,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=0138678f1a360d7573c5cfe5f7d0a7e0, ASSIGN}] 2024-12-07T17:25:10,791 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=0138678f1a360d7573c5cfe5f7d0a7e0, ASSIGN 2024-12-07T17:25:10,791 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, ASSIGN 2024-12-07T17:25:10,792 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=0138678f1a360d7573c5cfe5f7d0a7e0, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:25:10,792 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:25:10,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T17:25:10,943 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:25:10,943 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=0138678f1a360d7573c5cfe5f7d0a7e0, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:10,943 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:10,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:10,947 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:25:11,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T17:25:11,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:11,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:11,102 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:11,102 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 0138678f1a360d7573c5cfe5f7d0a7e0, NAME => 'testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:25:11,102 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,102 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. service=AccessControlService 2024-12-07T17:25:11,102 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, NAME => 'testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:25:11,102 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:11,102 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. service=AccessControlService 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:11,103 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,103 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,104 INFO [StoreOpener-0138678f1a360d7573c5cfe5f7d0a7e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,105 INFO [StoreOpener-95a53dd0f0fa93ab4e6bebd2e0c2e9c5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,106 INFO [StoreOpener-95a53dd0f0fa93ab4e6bebd2e0c2e9c5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 columnFamilyName cf 2024-12-07T17:25:11,106 INFO [StoreOpener-0138678f1a360d7573c5cfe5f7d0a7e0-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0138678f1a360d7573c5cfe5f7d0a7e0 columnFamilyName cf 2024-12-07T17:25:11,106 DEBUG [StoreOpener-95a53dd0f0fa93ab4e6bebd2e0c2e9c5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:11,106 DEBUG [StoreOpener-0138678f1a360d7573c5cfe5f7d0a7e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:11,106 INFO [StoreOpener-0138678f1a360d7573c5cfe5f7d0a7e0-1 {}] regionserver.HStore(327): Store=0138678f1a360d7573c5cfe5f7d0a7e0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:11,106 INFO [StoreOpener-95a53dd0f0fa93ab4e6bebd2e0c2e9c5-1 {}] regionserver.HStore(327): Store=95a53dd0f0fa93ab4e6bebd2e0c2e9c5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:11,107 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,107 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,108 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,108 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,110 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,110 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,112 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:11,112 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:11,113 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 95a53dd0f0fa93ab4e6bebd2e0c2e9c5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63706820, jitterRate=-0.050694406032562256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:11,113 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 0138678f1a360d7573c5cfe5f7d0a7e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75085463, jitterRate=0.11886058747768402}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:11,114 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5: 2024-12-07T17:25:11,114 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 0138678f1a360d7573c5cfe5f7d0a7e0: 2024-12-07T17:25:11,115 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5., pid=151, masterSystemTime=1733592311099 2024-12-07T17:25:11,115 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0., pid=150, masterSystemTime=1733592311098 2024-12-07T17:25:11,116 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,116 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,117 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:11,118 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:11,118 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:11,118 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=0138678f1a360d7573c5cfe5f7d0a7e0, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:11,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-07T17:25:11,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, server=5c70517e8267,36163,1733592165482 in 171 msec 2024-12-07T17:25:11,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-07T17:25:11,122 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0, server=5c70517e8267,41313,1733592165267 in 175 msec 2024-12-07T17:25:11,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=95a53dd0f0fa93ab4e6bebd2e0c2e9c5, ASSIGN in 330 msec 2024-12-07T17:25:11,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147 2024-12-07T17:25:11,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=0138678f1a360d7573c5cfe5f7d0a7e0, ASSIGN in 332 msec 2024-12-07T17:25:11,124 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:25:11,124 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592311124"}]},"ts":"1733592311124"} 2024-12-07T17:25:11,125 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-07T17:25:11,128 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:25:11,128 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-07T17:25:11,130 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T17:25:11,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:11,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:11,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:11,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:11,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 416 msec 2024-12-07T17:25:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T17:25:11,324 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-07T17:25:11,324 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-07T17:25:11,325 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:11,329 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-07T17:25:11,329 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:11,329 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-07T17:25:11,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:11,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:11,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-07T17:25:11,359 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:11,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-07T17:25:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T17:25:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09b833c5 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32701544 2024-12-07T17:25:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bd2ad3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:11,379 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09b833c5 to 127.0.0.1:58133 2024-12-07T17:25:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29ce23f8 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b26d9b0 2024-12-07T17:25:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58324be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:11,387 DEBUG [hconnection-0x6452860e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:11,388 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:11,391 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29ce23f8 to 127.0.0.1:58133 2024-12-07T17:25:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T17:25:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-07T17:25:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-07T17:25:11,395 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T17:25:11,396 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:11,399 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742180_1356 (size=152) 2024-12-07T17:25:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742180_1356 (size=152) 2024-12-07T17:25:11,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742180_1356 (size=152) 2024-12-07T17:25:11,409 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:11,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0}] 2024-12-07T17:25:11,410 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,410 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T17:25:11,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:11,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:11,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-07T17:25:11,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-07T17:25:11,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:11,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-07T17:25:11,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 0138678f1a360d7573c5cfe5f7d0a7e0 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-07T17:25:11,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/.tmp/cf/82e7d0c2cc614971be7e84f9d0c5a482 is 71, key is 0d68f2b60bb135b8e61a98b05b1b67d5/cf:q/1733592311345/Put/seqid=0 2024-12-07T17:25:11,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/.tmp/cf/9c73776cd8774b0a9c8d2ea751d71aa5 is 71, key is 1de9834277045cf029fa68dea908b143/cf:q/1733592311355/Put/seqid=0 2024-12-07T17:25:11,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742181_1357 (size=5216) 2024-12-07T17:25:11,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742181_1357 (size=5216) 2024-12-07T17:25:11,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742181_1357 (size=5216) 2024-12-07T17:25:11,586 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/.tmp/cf/82e7d0c2cc614971be7e84f9d0c5a482 2024-12-07T17:25:11,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/.tmp/cf/82e7d0c2cc614971be7e84f9d0c5a482 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/cf/82e7d0c2cc614971be7e84f9d0c5a482 2024-12-07T17:25:11,596 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/cf/82e7d0c2cc614971be7e84f9d0c5a482, entries=2, sequenceid=5, filesize=5.1 K 2024-12-07T17:25:11,597 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 in 34ms, sequenceid=5, compaction requested=false 2024-12-07T17:25:11,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-07T17:25:11,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5: 2024-12-07T17:25:11,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. for snapshot-testExportExpiredSnapshot completed. 2024-12-07T17:25:11,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:11,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/cf/82e7d0c2cc614971be7e84f9d0c5a482] hfiles 2024-12-07T17:25:11,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/cf/82e7d0c2cc614971be7e84f9d0c5a482 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742182_1358 (size=8394) 2024-12-07T17:25:11,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742182_1358 (size=8394) 2024-12-07T17:25:11,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742182_1358 (size=8394) 2024-12-07T17:25:11,599 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/.tmp/cf/9c73776cd8774b0a9c8d2ea751d71aa5 2024-12-07T17:25:11,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/.tmp/cf/9c73776cd8774b0a9c8d2ea751d71aa5 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/cf/9c73776cd8774b0a9c8d2ea751d71aa5 2024-12-07T17:25:11,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742183_1359 (size=103) 2024-12-07T17:25:11,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742183_1359 (size=103) 2024-12-07T17:25:11,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742183_1359 (size=103) 2024-12-07T17:25:11,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:25:11,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-07T17:25:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-07T17:25:11,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,606 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:25:11,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 in 197 msec 2024-12-07T17:25:11,611 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/cf/9c73776cd8774b0a9c8d2ea751d71aa5, entries=48, sequenceid=5, filesize=8.2 K 2024-12-07T17:25:11,612 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 0138678f1a360d7573c5cfe5f7d0a7e0 in 50ms, sequenceid=5, compaction requested=false 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 0138678f1a360d7573c5cfe5f7d0a7e0: 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. for snapshot-testExportExpiredSnapshot completed. 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/cf/9c73776cd8774b0a9c8d2ea751d71aa5] hfiles 2024-12-07T17:25:11,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/cf/9c73776cd8774b0a9c8d2ea751d71aa5 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742184_1360 (size=103) 2024-12-07T17:25:11,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742184_1360 (size=103) 2024-12-07T17:25:11,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742184_1360 (size=103) 2024-12-07T17:25:11,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:25:11,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-07T17:25:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-07T17:25:11,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,619 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:25:11,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-07T17:25:11,621 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:11,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 0138678f1a360d7573c5cfe5f7d0a7e0 in 211 msec 2024-12-07T17:25:11,622 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:11,622 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:11,622 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,623 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742185_1361 (size=609) 2024-12-07T17:25:11,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742185_1361 (size=609) 2024-12-07T17:25:11,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742185_1361 (size=609) 2024-12-07T17:25:11,631 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:11,636 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:11,636 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-07T17:25:11,637 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:11,637 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-07T17:25:11,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 244 msec 2024-12-07T17:25:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T17:25:11,697 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-07T17:25:12,269 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0006_000001 (auth:SIMPLE) from 127.0.0.1:44778 2024-12-07T17:25:12,281 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000001/launch_container.sh] 2024-12-07T17:25:12,281 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000001/container_tokens] 2024-12-07T17:25:12,281 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0006/container_1733592173099_0006_01_000001/sysfs] 2024-12-07T17:25:13,250 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:25:13,876 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:25:14,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-07T17:25:14,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-07T17:25:14,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-07T17:25:14,861 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-07T17:25:14,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T17:25:14,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T17:25:20,363 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:25:21,705 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592321705 2024-12-07T17:25:21,705 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592321705, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592321705, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:21,735 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:21,735 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592321705, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592321705/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-07T17:25:21,737 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:25:21,738 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:25:21,739 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-07T17:25:21,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-07T17:25:21,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:21,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T17:25:21,742 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592321741"}]},"ts":"1733592321741"} 2024-12-07T17:25:21,743 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-07T17:25:21,745 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-07T17:25:21,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-07T17:25:21,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, UNASSIGN}] 2024-12-07T17:25:21,747 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, UNASSIGN 2024-12-07T17:25:21,747 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, UNASSIGN 2024-12-07T17:25:21,748 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=0896c53167410c62154cbc0187feec1d, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:21,748 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=3fd7e64b6fc687219db67183a42fd845, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:21,749 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:21,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 0896c53167410c62154cbc0187feec1d, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:25:21,749 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:21,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure 3fd7e64b6fc687219db67183a42fd845, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:21,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T17:25:21,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:21,901 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:21,901 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:21,901 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 0896c53167410c62154cbc0187feec1d, disabling compactions & flushes 2024-12-07T17:25:21,901 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:21,901 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:21,901 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. after waiting 0 ms 2024-12-07T17:25:21,901 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:21,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:21,902 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:21,902 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:21,902 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing 3fd7e64b6fc687219db67183a42fd845, disabling compactions & flushes 2024-12-07T17:25:21,902 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:21,902 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:21,902 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. after waiting 0 ms 2024-12-07T17:25:21,902 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:21,905 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:21,905 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:21,905 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:21,906 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d. 2024-12-07T17:25:21,906 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 0896c53167410c62154cbc0187feec1d: 2024-12-07T17:25:21,906 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:21,906 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845. 2024-12-07T17:25:21,906 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for 3fd7e64b6fc687219db67183a42fd845: 2024-12-07T17:25:21,907 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:21,907 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=0896c53167410c62154cbc0187feec1d, regionState=CLOSED 2024-12-07T17:25:21,907 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed 3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:21,908 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=3fd7e64b6fc687219db67183a42fd845, regionState=CLOSED 2024-12-07T17:25:21,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-07T17:25:21,910 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 0896c53167410c62154cbc0187feec1d, server=5c70517e8267,36163,1733592165482 in 159 msec 2024-12-07T17:25:21,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-07T17:25:21,910 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure 3fd7e64b6fc687219db67183a42fd845, server=5c70517e8267,41313,1733592165267 in 160 msec 2024-12-07T17:25:21,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=0896c53167410c62154cbc0187feec1d, UNASSIGN in 164 msec 2024-12-07T17:25:21,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-07T17:25:21,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=3fd7e64b6fc687219db67183a42fd845, UNASSIGN in 164 msec 2024-12-07T17:25:21,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-07T17:25:21,913 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 167 msec 2024-12-07T17:25:21,914 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592321914"}]},"ts":"1733592321914"} 2024-12-07T17:25:21,915 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-07T17:25:21,917 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-07T17:25:21,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-12-07T17:25:22,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T17:25:22,043 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-07T17:25:22,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,045 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,046 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,047 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,049 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:22,049 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:22,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,051 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-07T17:25:22,051 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-07T17:25:22,052 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/recovered.edits] 2024-12-07T17:25:22,052 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/recovered.edits] 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,052 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-07T17:25:22,052 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:25:22,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-07T17:25:22,053 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:25:22,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-07T17:25:22,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,054 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,057 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/cf/dccbcd6bc78e4b5a9abe6a9338e29e1f 2024-12-07T17:25:22,057 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/cf/e37a2b4d385d4db3a3bba45353db35d8 2024-12-07T17:25:22,060 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d/recovered.edits/9.seqid 2024-12-07T17:25:22,060 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845/recovered.edits/9.seqid 2024-12-07T17:25:22,060 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/0896c53167410c62154cbc0187feec1d 2024-12-07T17:25:22,060 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportExpiredSnapshot/3fd7e64b6fc687219db67183a42fd845 2024-12-07T17:25:22,060 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-07T17:25:22,062 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,064 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-07T17:25:22,066 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-07T17:25:22,067 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,067 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-07T17:25:22,067 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592322067"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:22,067 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592322067"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:22,069 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:25:22,069 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3fd7e64b6fc687219db67183a42fd845, NAME => 'testtb-testExportExpiredSnapshot,,1733592309095.3fd7e64b6fc687219db67183a42fd845.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0896c53167410c62154cbc0187feec1d, NAME => 'testtb-testExportExpiredSnapshot,1,1733592309095.0896c53167410c62154cbc0187feec1d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:25:22,069 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-07T17:25:22,069 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592322069"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:22,070 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-07T17:25:22,072 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T17:25:22,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 28 msec 2024-12-07T17:25:22,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-07T17:25:22,155 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-07T17:25:22,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-07T17:25:22,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-07T17:25:22,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-07T17:25:22,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-07T17:25:22,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-07T17:25:22,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-07T17:25:22,189 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=789 (was 794), OpenFileDescriptor=781 (was 793), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=498 (was 565), ProcessCount=11 (was 20), AvailableMemoryMB=5178 (was 4229) - AvailableMemoryMB LEAK? - 2024-12-07T17:25:22,189 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-07T17:25:22,204 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=789, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=498, ProcessCount=11, AvailableMemoryMB=5177 2024-12-07T17:25:22,204 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-07T17:25:22,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:25:22,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:22,208 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:25:22,208 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:22,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-07T17:25:22,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T17:25:22,208 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:25:22,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742186_1362 (size=412) 2024-12-07T17:25:22,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742186_1362 (size=412) 2024-12-07T17:25:22,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742186_1362 (size=412) 2024-12-07T17:25:22,217 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3a2be2cac0a8dbe6615043afecd8922d, NAME => 'testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:22,217 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c2df44b3304dd5a62bf9c3602aa3a526, NAME => 'testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:22,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742187_1363 (size=73) 2024-12-07T17:25:22,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742187_1363 (size=73) 2024-12-07T17:25:22,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742188_1364 (size=73) 2024-12-07T17:25:22,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742187_1363 (size=73) 2024-12-07T17:25:22,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742188_1364 (size=73) 2024-12-07T17:25:22,234 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 3a2be2cac0a8dbe6615043afecd8922d, disabling compactions & flushes 2024-12-07T17:25:22,235 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. after waiting 0 ms 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,235 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3a2be2cac0a8dbe6615043afecd8922d: 2024-12-07T17:25:22,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742188_1364 (size=73) 2024-12-07T17:25:22,235 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:22,236 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing c2df44b3304dd5a62bf9c3602aa3a526, disabling compactions & flushes 2024-12-07T17:25:22,236 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,236 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,236 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. after waiting 0 ms 2024-12-07T17:25:22,236 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,236 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,236 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for c2df44b3304dd5a62bf9c3602aa3a526: 2024-12-07T17:25:22,237 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:25:22,237 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733592322237"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592322237"}]},"ts":"1733592322237"} 2024-12-07T17:25:22,237 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733592322237"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592322237"}]},"ts":"1733592322237"} 2024-12-07T17:25:22,239 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:25:22,239 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:25:22,240 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592322239"}]},"ts":"1733592322239"} 2024-12-07T17:25:22,241 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-07T17:25:22,244 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:25:22,245 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:25:22,245 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:25:22,245 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:25:22,245 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:25:22,245 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:25:22,245 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:25:22,245 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:25:22,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, ASSIGN}] 2024-12-07T17:25:22,246 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, ASSIGN 2024-12-07T17:25:22,246 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, ASSIGN 2024-12-07T17:25:22,247 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, ASSIGN; state=OFFLINE, location=5c70517e8267,38423,1733592165361; forceNewPlan=false, retain=false 2024-12-07T17:25:22,247 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:25:22,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T17:25:22,397 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:25:22,397 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3a2be2cac0a8dbe6615043afecd8922d, regionState=OPENING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:25:22,397 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c2df44b3304dd5a62bf9c3602aa3a526, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:22,399 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=163, state=RUNNABLE; OpenRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:25:22,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:22,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T17:25:22,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:25:22,551 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:22,553 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 3a2be2cac0a8dbe6615043afecd8922d, NAME => 'testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. service=AccessControlService 2024-12-07T17:25:22,554 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:22,554 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,554 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => c2df44b3304dd5a62bf9c3602aa3a526, NAME => 'testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:25:22,555 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. service=AccessControlService 2024-12-07T17:25:22,555 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:22,555 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,555 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:22,555 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,555 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,556 INFO [StoreOpener-3a2be2cac0a8dbe6615043afecd8922d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,557 INFO [StoreOpener-c2df44b3304dd5a62bf9c3602aa3a526-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,557 INFO [StoreOpener-3a2be2cac0a8dbe6615043afecd8922d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a2be2cac0a8dbe6615043afecd8922d columnFamilyName cf 2024-12-07T17:25:22,557 DEBUG [StoreOpener-3a2be2cac0a8dbe6615043afecd8922d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:22,558 INFO [StoreOpener-3a2be2cac0a8dbe6615043afecd8922d-1 {}] regionserver.HStore(327): Store=3a2be2cac0a8dbe6615043afecd8922d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:22,558 INFO [StoreOpener-c2df44b3304dd5a62bf9c3602aa3a526-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2df44b3304dd5a62bf9c3602aa3a526 columnFamilyName cf 2024-12-07T17:25:22,558 DEBUG [StoreOpener-c2df44b3304dd5a62bf9c3602aa3a526-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:22,558 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,559 INFO [StoreOpener-c2df44b3304dd5a62bf9c3602aa3a526-1 {}] regionserver.HStore(327): Store=c2df44b3304dd5a62bf9c3602aa3a526/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:22,559 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,559 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,560 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,561 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,561 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,563 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:22,563 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 3a2be2cac0a8dbe6615043afecd8922d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59526896, jitterRate=-0.11298012733459473}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:22,563 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:22,564 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened c2df44b3304dd5a62bf9c3602aa3a526; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62321811, jitterRate=-0.071332648396492}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:22,564 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for c2df44b3304dd5a62bf9c3602aa3a526: 2024-12-07T17:25:22,564 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 3a2be2cac0a8dbe6615043afecd8922d: 2024-12-07T17:25:22,565 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526., pid=166, masterSystemTime=1733592322551 2024-12-07T17:25:22,565 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d., pid=165, masterSystemTime=1733592322550 2024-12-07T17:25:22,566 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,566 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,567 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=3a2be2cac0a8dbe6615043afecd8922d, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:25:22,567 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,567 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:22,567 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=c2df44b3304dd5a62bf9c3602aa3a526, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:22,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=163 2024-12-07T17:25:22,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=163, state=SUCCESS; OpenRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d, server=5c70517e8267,38423,1733592165361 in 169 msec 2024-12-07T17:25:22,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-07T17:25:22,570 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526, server=5c70517e8267,41313,1733592165267 in 170 msec 2024-12-07T17:25:22,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, ASSIGN in 324 msec 2024-12-07T17:25:22,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-12-07T17:25:22,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, ASSIGN in 325 msec 2024-12-07T17:25:22,572 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:25:22,572 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592322572"}]},"ts":"1733592322572"} 2024-12-07T17:25:22,573 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-07T17:25:22,576 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:25:22,576 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-07T17:25:22,578 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:25:22,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,586 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:22,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 380 msec 2024-12-07T17:25:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T17:25:22,811 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-07T17:25:22,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-07T17:25:22,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:22,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-07T17:25:22,815 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:22,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-07T17:25:22,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592322818 (current time:1733592322818). 2024-12-07T17:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-07T17:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:22,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f0b7a2f to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@30d4e0f4 2024-12-07T17:25:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc52897, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:22,824 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:22,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f0b7a2f to 127.0.0.1:58133 2024-12-07T17:25:22,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:22,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53c3fb27 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@440762eb 2024-12-07T17:25:22,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72b07b6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:22,829 DEBUG [hconnection-0xc1c0ef2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:22,830 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:22,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:22,832 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53c3fb27 to 127.0.0.1:58133 2024-12-07T17:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:22,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:25:22,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:25:22,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-07T17:25:22,835 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T17:25:22,836 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:22,838 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742189_1365 (size=185) 2024-12-07T17:25:22,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742189_1365 (size=185) 2024-12-07T17:25:22,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742189_1365 (size=185) 2024-12-07T17:25:22,847 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:22,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526}] 2024-12-07T17:25:22,848 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:22,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:22,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T17:25:22,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:22,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:25:22,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-07T17:25:22,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-07T17:25:22,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:22,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3a2be2cac0a8dbe6615043afecd8922d: 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for c2df44b3304dd5a62bf9c3602aa3a526: 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:23,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742190_1366 (size=76) 2024-12-07T17:25:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742191_1367 (size=76) 2024-12-07T17:25:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742191_1367 (size=76) 2024-12-07T17:25:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742191_1367 (size=76) 2024-12-07T17:25:23,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742190_1366 (size=76) 2024-12-07T17:25:23,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742190_1366 (size=76) 2024-12-07T17:25:23,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:23,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-07T17:25:23,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:23,011 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-07T17:25:23,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-07T17:25:23,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-07T17:25:23,011 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:23,011 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:23,011 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:23,012 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:23,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 in 165 msec 2024-12-07T17:25:23,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-07T17:25:23,014 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:23,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d in 165 msec 2024-12-07T17:25:23,014 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:23,015 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:23,015 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,016 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742192_1368 (size=567) 2024-12-07T17:25:23,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742192_1368 (size=567) 2024-12-07T17:25:23,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742192_1368 (size=567) 2024-12-07T17:25:23,025 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:23,029 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:23,029 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,030 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:23,031 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-07T17:25:23,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 197 msec 2024-12-07T17:25:23,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T17:25:23,138 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-07T17:25:23,143 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=38423 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:23,144 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:23,147 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-07T17:25:23,147 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:23,147 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:23,156 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:25:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592323156 (current time:1733592323156). 2024-12-07T17:25:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-07T17:25:23,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x792ee966 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d11629b 2024-12-07T17:25:23,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3813e35d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:23,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:23,162 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:23,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x792ee966 to 127.0.0.1:58133 2024-12-07T17:25:23,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:23,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0961bfbb to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7f9211b6 2024-12-07T17:25:23,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583c1c12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:23,167 DEBUG [hconnection-0xf7ea9cb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:23,168 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:23,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:23,170 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:23,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0961bfbb to 127.0.0.1:58133 2024-12-07T17:25:23,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:23,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T17:25:23,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:23,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T17:25:23,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-07T17:25:23,173 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:23,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T17:25:23,174 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:23,176 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742193_1369 (size=180) 2024-12-07T17:25:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742193_1369 (size=180) 2024-12-07T17:25:23,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742193_1369 (size=180) 2024-12-07T17:25:23,184 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:23,185 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526}] 2024-12-07T17:25:23,185 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:23,185 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T17:25:23,336 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:25:23,336 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:23,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38423 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-07T17:25:23,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-07T17:25:23,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:23,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:23,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 3a2be2cac0a8dbe6615043afecd8922d 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-07T17:25:23,337 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing c2df44b3304dd5a62bf9c3602aa3a526 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-07T17:25:23,355 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/.tmp/cf/cf240851570e4854be9d02c255809b74 is 71, key is 118cc6a300bcdaeedbdb1db8eda823c4/cf:q/1733592323144/Put/seqid=0 2024-12-07T17:25:23,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/.tmp/cf/85dd509ca2e94178affdbe62099cb1fc is 71, key is 009618892726d4f35cc633fe1142fe8d/cf:q/1733592323143/Put/seqid=0 2024-12-07T17:25:23,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742194_1370 (size=8256) 2024-12-07T17:25:23,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742194_1370 (size=8256) 2024-12-07T17:25:23,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742194_1370 (size=8256) 2024-12-07T17:25:23,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/.tmp/cf/cf240851570e4854be9d02c255809b74 2024-12-07T17:25:23,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/.tmp/cf/cf240851570e4854be9d02c255809b74 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74 2024-12-07T17:25:23,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74, entries=46, sequenceid=6, filesize=8.1 K 2024-12-07T17:25:23,372 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c2df44b3304dd5a62bf9c3602aa3a526 in 35ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:23,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-07T17:25:23,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for c2df44b3304dd5a62bf9c3602aa3a526: 2024-12-07T17:25:23,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-07T17:25:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74] hfiles 2024-12-07T17:25:23,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742195_1371 (size=5356) 2024-12-07T17:25:23,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742195_1371 (size=5356) 2024-12-07T17:25:23,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742195_1371 (size=5356) 2024-12-07T17:25:23,376 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/.tmp/cf/85dd509ca2e94178affdbe62099cb1fc 2024-12-07T17:25:23,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742196_1372 (size=115) 2024-12-07T17:25:23,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742196_1372 (size=115) 2024-12-07T17:25:23,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742196_1372 (size=115) 2024-12-07T17:25:23,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/.tmp/cf/85dd509ca2e94178affdbe62099cb1fc as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc 2024-12-07T17:25:23,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:23,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-07T17:25:23,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-07T17:25:23,381 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:23,382 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:23,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526 in 197 msec 2024-12-07T17:25:23,386 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc, entries=4, sequenceid=6, filesize=5.2 K 2024-12-07T17:25:23,386 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 3a2be2cac0a8dbe6615043afecd8922d in 49ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:23,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 3a2be2cac0a8dbe6615043afecd8922d: 2024-12-07T17:25:23,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-07T17:25:23,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:23,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc] hfiles 2024-12-07T17:25:23,387 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742197_1373 (size=115) 2024-12-07T17:25:23,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742197_1373 (size=115) 2024-12-07T17:25:23,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742197_1373 (size=115) 2024-12-07T17:25:23,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:23,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-07T17:25:23,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-07T17:25:23,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:23,393 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:23,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-07T17:25:23,395 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d in 209 msec 2024-12-07T17:25:23,395 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:23,396 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:23,396 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:23,396 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,397 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742198_1374 (size=645) 2024-12-07T17:25:23,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742198_1374 (size=645) 2024-12-07T17:25:23,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742198_1374 (size=645) 2024-12-07T17:25:23,410 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:23,414 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:23,414 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,415 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:23,415 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-07T17:25:23,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 244 msec 2024-12-07T17:25:23,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T17:25:23,475 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-07T17:25:23,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475 2024-12-07T17:25:23,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:23,505 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:23,505 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,507 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:25:23,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:23,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742200_1376 (size=567) 2024-12-07T17:25:23,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742200_1376 (size=567) 2024-12-07T17:25:23,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742200_1376 (size=567) 2024-12-07T17:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742199_1375 (size=185) 2024-12-07T17:25:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742199_1375 (size=185) 2024-12-07T17:25:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742199_1375 (size=185) 2024-12-07T17:25:23,527 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:23,527 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:23,527 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:23,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-17513608595262097809.jar 2024-12-07T17:25:24,529 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,529 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-587194814801438129.jar 2024-12-07T17:25:24,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:24,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:25:24,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:25:24,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:25:24,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:25:24,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:25:24,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:25:24,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:25:24,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:24,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:24,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:24,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:24,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:24,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:24,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:24,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742201_1377 (size=127628) 2024-12-07T17:25:24,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742201_1377 (size=127628) 2024-12-07T17:25:24,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742201_1377 (size=127628) 2024-12-07T17:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T17:25:24,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T17:25:24,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T17:25:24,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742203_1379 (size=213228) 2024-12-07T17:25:24,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742203_1379 (size=213228) 2024-12-07T17:25:24,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742203_1379 (size=213228) 2024-12-07T17:25:24,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742204_1380 (size=1877034) 2024-12-07T17:25:24,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742204_1380 (size=1877034) 2024-12-07T17:25:24,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742204_1380 (size=1877034) 2024-12-07T17:25:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742205_1381 (size=533455) 2024-12-07T17:25:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742205_1381 (size=533455) 2024-12-07T17:25:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742205_1381 (size=533455) 2024-12-07T17:25:24,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742206_1382 (size=7280644) 2024-12-07T17:25:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742206_1382 (size=7280644) 2024-12-07T17:25:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742206_1382 (size=7280644) 2024-12-07T17:25:24,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742207_1383 (size=4188619) 2024-12-07T17:25:24,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742207_1383 (size=4188619) 2024-12-07T17:25:24,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742207_1383 (size=4188619) 2024-12-07T17:25:24,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742208_1384 (size=20406) 2024-12-07T17:25:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742208_1384 (size=20406) 2024-12-07T17:25:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742208_1384 (size=20406) 2024-12-07T17:25:24,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742209_1385 (size=75495) 2024-12-07T17:25:24,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742209_1385 (size=75495) 2024-12-07T17:25:24,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742209_1385 (size=75495) 2024-12-07T17:25:24,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742210_1386 (size=45609) 2024-12-07T17:25:24,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742210_1386 (size=45609) 2024-12-07T17:25:24,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742210_1386 (size=45609) 2024-12-07T17:25:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742211_1387 (size=110084) 2024-12-07T17:25:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742211_1387 (size=110084) 2024-12-07T17:25:24,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742211_1387 (size=110084) 2024-12-07T17:25:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742212_1388 (size=1323991) 2024-12-07T17:25:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742212_1388 (size=1323991) 2024-12-07T17:25:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742212_1388 (size=1323991) 2024-12-07T17:25:24,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742213_1389 (size=23076) 2024-12-07T17:25:24,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742213_1389 (size=23076) 2024-12-07T17:25:24,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742213_1389 (size=23076) 2024-12-07T17:25:24,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742214_1390 (size=6350154) 2024-12-07T17:25:24,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742214_1390 (size=6350154) 2024-12-07T17:25:24,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742214_1390 (size=6350154) 2024-12-07T17:25:24,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742215_1391 (size=126803) 2024-12-07T17:25:24,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742215_1391 (size=126803) 2024-12-07T17:25:24,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742215_1391 (size=126803) 2024-12-07T17:25:24,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742216_1392 (size=322274) 2024-12-07T17:25:24,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742216_1392 (size=322274) 2024-12-07T17:25:24,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742216_1392 (size=322274) 2024-12-07T17:25:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-07T17:25:24,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-07T17:25:24,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-07T17:25:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742217_1393 (size=1832290) 2024-12-07T17:25:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742217_1393 (size=1832290) 2024-12-07T17:25:24,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742217_1393 (size=1832290) 2024-12-07T17:25:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742218_1394 (size=30081) 2024-12-07T17:25:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742218_1394 (size=30081) 2024-12-07T17:25:24,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742218_1394 (size=30081) 2024-12-07T17:25:24,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742219_1395 (size=53616) 2024-12-07T17:25:24,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742219_1395 (size=53616) 2024-12-07T17:25:24,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742219_1395 (size=53616) 2024-12-07T17:25:24,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742220_1396 (size=29229) 2024-12-07T17:25:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742220_1396 (size=29229) 2024-12-07T17:25:24,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742220_1396 (size=29229) 2024-12-07T17:25:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742221_1397 (size=169089) 2024-12-07T17:25:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742221_1397 (size=169089) 2024-12-07T17:25:24,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742221_1397 (size=169089) 2024-12-07T17:25:24,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742222_1398 (size=451756) 2024-12-07T17:25:24,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742222_1398 (size=451756) 2024-12-07T17:25:24,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742222_1398 (size=451756) 2024-12-07T17:25:24,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T17:25:24,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T17:25:24,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T17:25:24,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742224_1400 (size=136454) 2024-12-07T17:25:24,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742224_1400 (size=136454) 2024-12-07T17:25:24,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742224_1400 (size=136454) 2024-12-07T17:25:24,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742225_1401 (size=907853) 2024-12-07T17:25:24,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742225_1401 (size=907853) 2024-12-07T17:25:24,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742225_1401 (size=907853) 2024-12-07T17:25:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T17:25:24,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T17:25:24,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T17:25:24,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742227_1403 (size=503880) 2024-12-07T17:25:24,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742227_1403 (size=503880) 2024-12-07T17:25:24,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742227_1403 (size=503880) 2024-12-07T17:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T17:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T17:25:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T17:25:25,007 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:25:25,010 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-07T17:25:25,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742229_1405 (size=7) 2024-12-07T17:25:25,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742229_1405 (size=7) 2024-12-07T17:25:25,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742229_1405 (size=7) 2024-12-07T17:25:25,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742230_1406 (size=10) 2024-12-07T17:25:25,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742230_1406 (size=10) 2024-12-07T17:25:25,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742230_1406 (size=10) 2024-12-07T17:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742231_1407 (size=304786) 2024-12-07T17:25:25,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742231_1407 (size=304786) 2024-12-07T17:25:25,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742231_1407 (size=304786) 2024-12-07T17:25:25,076 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:25:25,076 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:25:25,275 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0007_000001 (auth:SIMPLE) from 127.0.0.1:38206 2024-12-07T17:25:27,407 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:25:30,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0007_000001 (auth:SIMPLE) from 127.0.0.1:42186 2024-12-07T17:25:30,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742232_1408 (size=350436) 2024-12-07T17:25:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742232_1408 (size=350436) 2024-12-07T17:25:30,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742232_1408 (size=350436) 2024-12-07T17:25:31,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742233_1409 (size=8568) 2024-12-07T17:25:31,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742233_1409 (size=8568) 2024-12-07T17:25:31,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742233_1409 (size=8568) 2024-12-07T17:25:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742234_1410 (size=460) 2024-12-07T17:25:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742234_1410 (size=460) 2024-12-07T17:25:31,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742234_1410 (size=460) 2024-12-07T17:25:31,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742235_1411 (size=8568) 2024-12-07T17:25:31,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742235_1411 (size=8568) 2024-12-07T17:25:31,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742235_1411 (size=8568) 2024-12-07T17:25:31,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742236_1412 (size=350436) 2024-12-07T17:25:31,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742236_1412 (size=350436) 2024-12-07T17:25:31,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742236_1412 (size=350436) 2024-12-07T17:25:33,188 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:25:33,189 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:25:33,193 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:33,193 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:25:33,194 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:25:33,194 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:33,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-07T17:25:33,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-07T17:25:33,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:33,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-07T17:25:33,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592323475/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-07T17:25:33,200 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,200 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T17:25:33,203 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592333203"}]},"ts":"1733592333203"} 2024-12-07T17:25:33,205 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-07T17:25:33,207 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-07T17:25:33,208 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-07T17:25:33,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, UNASSIGN}] 2024-12-07T17:25:33,211 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, UNASSIGN 2024-12-07T17:25:33,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, UNASSIGN 2024-12-07T17:25:33,216 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=c2df44b3304dd5a62bf9c3602aa3a526, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:33,216 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3a2be2cac0a8dbe6615043afecd8922d, regionState=CLOSING, regionLocation=5c70517e8267,38423,1733592165361 2024-12-07T17:25:33,217 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:33,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE; CloseRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:33,218 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:25:33,218 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=175, state=RUNNABLE; CloseRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d, server=5c70517e8267,38423,1733592165361}] 2024-12-07T17:25:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T17:25:33,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:33,370 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:33,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,38423,1733592165361 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing c2df44b3304dd5a62bf9c3602aa3a526, disabling compactions & flushes 2024-12-07T17:25:33,371 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:33,371 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. after waiting 0 ms 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 3a2be2cac0a8dbe6615043afecd8922d, disabling compactions & flushes 2024-12-07T17:25:33,371 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. after waiting 0 ms 2024-12-07T17:25:33,371 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:33,375 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:33,375 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:25:33,376 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:33,376 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:25:33,376 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526. 2024-12-07T17:25:33,376 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d. 2024-12-07T17:25:33,376 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for c2df44b3304dd5a62bf9c3602aa3a526: 2024-12-07T17:25:33,376 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 3a2be2cac0a8dbe6615043afecd8922d: 2024-12-07T17:25:33,377 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:33,378 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=3a2be2cac0a8dbe6615043afecd8922d, regionState=CLOSED 2024-12-07T17:25:33,378 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:33,379 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=c2df44b3304dd5a62bf9c3602aa3a526, regionState=CLOSED 2024-12-07T17:25:33,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=175 2024-12-07T17:25:33,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=175, state=SUCCESS; CloseRegionProcedure 3a2be2cac0a8dbe6615043afecd8922d, server=5c70517e8267,38423,1733592165361 in 162 msec 2024-12-07T17:25:33,382 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-12-07T17:25:33,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; CloseRegionProcedure c2df44b3304dd5a62bf9c3602aa3a526, server=5c70517e8267,41313,1733592165267 in 164 msec 2024-12-07T17:25:33,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a2be2cac0a8dbe6615043afecd8922d, UNASSIGN in 173 msec 2024-12-07T17:25:33,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-07T17:25:33,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2df44b3304dd5a62bf9c3602aa3a526, UNASSIGN in 173 msec 2024-12-07T17:25:33,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-07T17:25:33,385 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 176 msec 2024-12-07T17:25:33,386 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592333386"}]},"ts":"1733592333386"} 2024-12-07T17:25:33,387 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-07T17:25:33,389 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-07T17:25:33,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 189 msec 2024-12-07T17:25:33,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T17:25:33,505 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-07T17:25:33,505 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,507 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,508 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,509 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,510 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:33,510 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:33,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,513 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,513 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/recovered.edits] 2024-12-07T17:25:33,513 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/recovered.edits] 2024-12-07T17:25:33,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T17:25:33,513 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T17:25:33,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T17:25:33,514 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T17:25:33,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:33,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:33,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:33,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:33,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:33,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T17:25:33,518 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/cf/85dd509ca2e94178affdbe62099cb1fc 2024-12-07T17:25:33,519 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/cf/cf240851570e4854be9d02c255809b74 2024-12-07T17:25:33,521 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d/recovered.edits/9.seqid 2024-12-07T17:25:33,521 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526/recovered.edits/9.seqid 2024-12-07T17:25:33,522 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/3a2be2cac0a8dbe6615043afecd8922d 2024-12-07T17:25:33,522 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testEmptyExportFileSystemState/c2df44b3304dd5a62bf9c3602aa3a526 2024-12-07T17:25:33,522 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-07T17:25:33,524 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,526 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-07T17:25:33,528 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-07T17:25:33,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,529 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-07T17:25:33,530 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592333529"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:33,530 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592333529"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:33,532 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:25:33,532 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3a2be2cac0a8dbe6615043afecd8922d, NAME => 'testtb-testEmptyExportFileSystemState,,1733592322206.3a2be2cac0a8dbe6615043afecd8922d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c2df44b3304dd5a62bf9c3602aa3a526, NAME => 'testtb-testEmptyExportFileSystemState,1,1733592322206.c2df44b3304dd5a62bf9c3602aa3a526.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:25:33,532 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-07T17:25:33,532 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592333532"}]},"ts":"9223372036854775807"} 2024-12-07T17:25:33,534 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-07T17:25:33,536 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T17:25:33,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 31 msec 2024-12-07T17:25:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T17:25:33,619 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-07T17:25:33,633 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-07T17:25:33,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:33,636 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-07T17:25:33,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-07T17:25:33,662 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=803 (was 789) Potentially hanging thread: process reaper (pid 3381) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36531 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:40703 from appattempt_1733592173099_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:51152 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2095817456_1 at /127.0.0.1:46108 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1173471231) connection to localhost/127.0.0.1:36531 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:45286 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1614647307_22 at /127.0.0.1:54920 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5676 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2095817456_1 at /127.0.0.1:56336 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xeb8403e-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 781) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=483 (was 498), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4502 (was 5177) 2024-12-07T17:25:33,662 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-07T17:25:33,697 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=803, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=483, ProcessCount=17, AvailableMemoryMB=4492 2024-12-07T17:25:33,697 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-07T17:25:33,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:25:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:25:33,701 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:25:33,701 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:33,701 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-07T17:25:33,702 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:25:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T17:25:33,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742237_1413 (size=404) 2024-12-07T17:25:33,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742237_1413 (size=404) 2024-12-07T17:25:33,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742237_1413 (size=404) 2024-12-07T17:25:33,745 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f625811ebdefbb45b189e2610d4c94f1, NAME => 'testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:33,765 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 2a23f4978d5b815f844acd23908df010, NAME => 'testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:33,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742238_1414 (size=65) 2024-12-07T17:25:33,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742238_1414 (size=65) 2024-12-07T17:25:33,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742238_1414 (size=65) 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing f625811ebdefbb45b189e2610d4c94f1, disabling compactions & flushes 2024-12-07T17:25:33,788 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. after waiting 0 ms 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:33,788 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:33,788 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for f625811ebdefbb45b189e2610d4c94f1: 2024-12-07T17:25:33,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T17:25:33,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742239_1415 (size=65) 2024-12-07T17:25:33,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742239_1415 (size=65) 2024-12-07T17:25:33,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742239_1415 (size=65) 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 2a23f4978d5b815f844acd23908df010, disabling compactions & flushes 2024-12-07T17:25:33,813 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. after waiting 0 ms 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:33,813 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:33,813 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 2a23f4978d5b815f844acd23908df010: 2024-12-07T17:25:33,814 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:25:33,815 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592333814"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592333814"}]},"ts":"1733592333814"} 2024-12-07T17:25:33,815 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733592333814"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592333814"}]},"ts":"1733592333814"} 2024-12-07T17:25:33,818 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:25:33,818 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:25:33,819 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592333818"}]},"ts":"1733592333818"} 2024-12-07T17:25:33,820 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-07T17:25:33,824 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:25:33,825 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:25:33,825 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:25:33,825 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:25:33,825 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:25:33,825 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:25:33,825 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:25:33,826 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:25:33,826 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, ASSIGN}] 2024-12-07T17:25:33,827 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, ASSIGN 2024-12-07T17:25:33,827 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, ASSIGN 2024-12-07T17:25:33,828 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:25:33,828 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:25:33,978 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:25:33,979 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=f625811ebdefbb45b189e2610d4c94f1, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:33,979 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=2a23f4978d5b815f844acd23908df010, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:33,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 2a23f4978d5b815f844acd23908df010, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:25:33,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure f625811ebdefbb45b189e2610d4c94f1, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:25:34,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T17:25:34,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:34,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:34,137 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,137 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => f625811ebdefbb45b189e2610d4c94f1, NAME => 'testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:25:34,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. service=AccessControlService 2024-12-07T17:25:34,138 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:34,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:34,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,138 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,140 INFO [StoreOpener-f625811ebdefbb45b189e2610d4c94f1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,141 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,141 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 2a23f4978d5b815f844acd23908df010, NAME => 'testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:25:34,141 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. service=AccessControlService 2024-12-07T17:25:34,141 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:25:34,141 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,142 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:25:34,142 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,142 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,143 INFO [StoreOpener-f625811ebdefbb45b189e2610d4c94f1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f625811ebdefbb45b189e2610d4c94f1 columnFamilyName cf 2024-12-07T17:25:34,143 DEBUG [StoreOpener-f625811ebdefbb45b189e2610d4c94f1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:34,143 INFO [StoreOpener-f625811ebdefbb45b189e2610d4c94f1-1 {}] regionserver.HStore(327): Store=f625811ebdefbb45b189e2610d4c94f1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:34,144 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,144 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,146 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,148 INFO [StoreOpener-2a23f4978d5b815f844acd23908df010-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,149 INFO [StoreOpener-2a23f4978d5b815f844acd23908df010-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a23f4978d5b815f844acd23908df010 columnFamilyName cf 2024-12-07T17:25:34,149 DEBUG [StoreOpener-2a23f4978d5b815f844acd23908df010-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:25:34,150 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:34,151 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened f625811ebdefbb45b189e2610d4c94f1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59374977, jitterRate=-0.11524389684200287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:34,152 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for f625811ebdefbb45b189e2610d4c94f1: 2024-12-07T17:25:34,152 INFO [StoreOpener-2a23f4978d5b815f844acd23908df010-1 {}] regionserver.HStore(327): Store=2a23f4978d5b815f844acd23908df010/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:25:34,153 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1., pid=184, masterSystemTime=1733592334134 2024-12-07T17:25:34,153 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,154 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,154 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,155 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,155 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=f625811ebdefbb45b189e2610d4c94f1, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:25:34,157 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-07T17:25:34,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure f625811ebdefbb45b189e2610d4c94f1, server=5c70517e8267,41313,1733592165267 in 175 msec 2024-12-07T17:25:34,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, ASSIGN in 333 msec 2024-12-07T17:25:34,164 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:25:34,165 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 2a23f4978d5b815f844acd23908df010; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60183279, jitterRate=-0.10319925844669342}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:25:34,165 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 2a23f4978d5b815f844acd23908df010: 2024-12-07T17:25:34,166 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010., pid=183, masterSystemTime=1733592334134 2024-12-07T17:25:34,167 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,167 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,168 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=2a23f4978d5b815f844acd23908df010, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:25:34,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-07T17:25:34,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 2a23f4978d5b815f844acd23908df010, server=5c70517e8267,36163,1733592165482 in 188 msec 2024-12-07T17:25:34,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=180 2024-12-07T17:25:34,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, ASSIGN in 344 msec 2024-12-07T17:25:34,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:25:34,173 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592334173"}]},"ts":"1733592334173"} 2024-12-07T17:25:34,174 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-07T17:25:34,176 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:25:34,177 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-07T17:25:34,180 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T17:25:34,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:34,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:34,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:34,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,186 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T17:25:34,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 485 msec 2024-12-07T17:25:34,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T17:25:34,307 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-07T17:25:34,307 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-07T17:25:34,307 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:34,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-07T17:25:34,315 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:34,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-07T17:25:34,327 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T17:25:34,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592334327 (current time:1733592334327). 2024-12-07T17:25:34,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:34,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-07T17:25:34,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:34,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3437e227 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8af537f 2024-12-07T17:25:34,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ff6eec7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:34,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,355 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3437e227 to 127.0.0.1:58133 2024-12-07T17:25:34,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:34,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x33606154 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4126f8ec 2024-12-07T17:25:34,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@395ea6d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:34,376 DEBUG [hconnection-0x6d92b3c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,378 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,380 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x33606154 to 127.0.0.1:58133 2024-12-07T17:25:34,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:34,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T17:25:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T17:25:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-07T17:25:34,385 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:34,386 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:34,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T17:25:34,390 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:34,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742240_1416 (size=161) 2024-12-07T17:25:34,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742240_1416 (size=161) 2024-12-07T17:25:34,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742240_1416 (size=161) 2024-12-07T17:25:34,419 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:34,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010}] 2024-12-07T17:25:34,420 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,420 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T17:25:34,572 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:34,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:34,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-07T17:25:34,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 2a23f4978d5b815f844acd23908df010: 2024-12-07T17:25:34,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. for emptySnaptb0-testExportWithChecksum completed. 2024-12-07T17:25:34,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-07T17:25:34,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:34,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for f625811ebdefbb45b189e2610d4c94f1: 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. for emptySnaptb0-testExportWithChecksum completed. 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:34,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:25:34,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742241_1417 (size=68) 2024-12-07T17:25:34,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742241_1417 (size=68) 2024-12-07T17:25:34,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742241_1417 (size=68) 2024-12-07T17:25:34,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-07T17:25:34,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-07T17:25:34,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742242_1418 (size=68) 2024-12-07T17:25:34,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742242_1418 (size=68) 2024-12-07T17:25:34,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742242_1418 (size=68) 2024-12-07T17:25:34,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-07T17:25:34,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-07T17:25:34,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 in 198 msec 2024-12-07T17:25:34,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,619 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-07T17:25:34,620 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:34,621 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 in 200 msec 2024-12-07T17:25:34,621 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:34,622 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:34,622 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-07T17:25:34,622 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-07T17:25:34,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742243_1419 (size=543) 2024-12-07T17:25:34,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742243_1419 (size=543) 2024-12-07T17:25:34,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742243_1419 (size=543) 2024-12-07T17:25:34,671 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:34,677 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:34,677 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-07T17:25:34,678 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:34,679 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-07T17:25:34,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 296 msec 2024-12-07T17:25:34,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T17:25:34,689 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-07T17:25:34,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:34,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:25:34,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-07T17:25:34,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,702 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:25:34,737 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T17:25:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592334737 (current time:1733592334737). 2024-12-07T17:25:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:25:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-07T17:25:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:25:34,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x677b74fe to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f0f972a 2024-12-07T17:25:34,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2751e59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,751 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45522, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x677b74fe to 127.0.0.1:58133 2024-12-07T17:25:34,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:34,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ea35d70 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@df62149 2024-12-07T17:25:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27db6178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:25:34,769 DEBUG [hconnection-0x6da843de-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,770 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:25:34,773 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:25:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ea35d70 to 127.0.0.1:58133 2024-12-07T17:25:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:25:34,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T17:25:34,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:25:34,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T17:25:34,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-07T17:25:34,777 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:25:34,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T17:25:34,777 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:25:34,781 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:25:34,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742244_1420 (size=156) 2024-12-07T17:25:34,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742244_1420 (size=156) 2024-12-07T17:25:34,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742244_1420 (size=156) 2024-12-07T17:25:34,795 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:25:34,795 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010}] 2024-12-07T17:25:34,796 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:34,796 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:34,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-07T17:25:34,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-07T17:25:34,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-07T17:25:34,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T17:25:34,947 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:25:34,947 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:25:34,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-07T17:25:34,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-07T17:25:34,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:34,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:34,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 2a23f4978d5b815f844acd23908df010 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-07T17:25:34,949 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing f625811ebdefbb45b189e2610d4c94f1 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-07T17:25:34,975 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/.tmp/cf/cae54eab1cbd401ba51d692732f507aa is 71, key is 01854a3ab8c432e88555002023e170e3/cf:q/1733592334696/Put/seqid=0 2024-12-07T17:25:34,979 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/.tmp/cf/ab72d64fd3d644368c6d49615acf9ecf is 71, key is 18d23962c726f20eef334f803d734deb/cf:q/1733592334698/Put/seqid=0 2024-12-07T17:25:35,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742245_1421 (size=5492) 2024-12-07T17:25:35,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742245_1421 (size=5492) 2024-12-07T17:25:35,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742245_1421 (size=5492) 2024-12-07T17:25:35,006 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/.tmp/cf/cae54eab1cbd401ba51d692732f507aa 2024-12-07T17:25:35,013 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/.tmp/cf/cae54eab1cbd401ba51d692732f507aa as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa 2024-12-07T17:25:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742246_1422 (size=8120) 2024-12-07T17:25:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742246_1422 (size=8120) 2024-12-07T17:25:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742246_1422 (size=8120) 2024-12-07T17:25:35,017 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/.tmp/cf/ab72d64fd3d644368c6d49615acf9ecf 2024-12-07T17:25:35,020 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa, entries=6, sequenceid=6, filesize=5.4 K 2024-12-07T17:25:35,021 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for f625811ebdefbb45b189e2610d4c94f1 in 73ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:35,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for f625811ebdefbb45b189e2610d4c94f1: 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. for snaptb0-testExportWithChecksum completed. 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa] hfiles 2024-12-07T17:25:35,022 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa for snapshot=snaptb0-testExportWithChecksum 2024-12-07T17:25:35,023 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/.tmp/cf/ab72d64fd3d644368c6d49615acf9ecf as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf 2024-12-07T17:25:35,028 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf, entries=44, sequenceid=6, filesize=7.9 K 2024-12-07T17:25:35,028 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 2a23f4978d5b815f844acd23908df010 in 80ms, sequenceid=6, compaction requested=false 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 2a23f4978d5b815f844acd23908df010: 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. for snaptb0-testExportWithChecksum completed. 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf] hfiles 2024-12-07T17:25:35,029 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf for snapshot=snaptb0-testExportWithChecksum 2024-12-07T17:25:35,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742248_1424 (size=107) 2024-12-07T17:25:35,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742248_1424 (size=107) 2024-12-07T17:25:35,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742248_1424 (size=107) 2024-12-07T17:25:35,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:25:35,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-07T17:25:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-07T17:25:35,064 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:35,064 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 2024-12-07T17:25:35,067 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 2a23f4978d5b815f844acd23908df010 in 270 msec 2024-12-07T17:25:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T17:25:35,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742247_1423 (size=107) 2024-12-07T17:25:35,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742247_1423 (size=107) 2024-12-07T17:25:35,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742247_1423 (size=107) 2024-12-07T17:25:35,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:25:35,090 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-07T17:25:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-07T17:25:35,090 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:35,090 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:25:35,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=189, resume processing ppid=188 2024-12-07T17:25:35,093 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure f625811ebdefbb45b189e2610d4c94f1 in 296 msec 2024-12-07T17:25:35,093 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:25:35,094 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:25:35,094 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:25:35,094 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-07T17:25:35,096 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T17:25:35,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742249_1425 (size=621) 2024-12-07T17:25:35,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742249_1425 (size=621) 2024-12-07T17:25:35,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742249_1425 (size=621) 2024-12-07T17:25:35,124 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:25:35,130 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:25:35,131 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T17:25:35,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:25:35,132 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-07T17:25:35,147 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 357 msec 2024-12-07T17:25:35,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T17:25:35,381 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-07T17:25:35,382 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382 2024-12-07T17:25:35,382 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:35,418 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:25:35,418 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@74070ae7, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T17:25:35,420 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:25:35,423 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T17:25:35,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:35,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:35,458 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:35,459 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-7349396745386837869.jar 2024-12-07T17:25:36,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-5033002744331548978.jar 2024-12-07T17:25:36,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:25:36,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:25:36,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:25:36,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:25:36,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:25:36,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:25:36,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:25:36,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:25:36,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:25:36,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:25:36,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:25:36,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:25:36,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:25:36,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:36,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:36,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:36,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:36,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:25:36,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:36,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:25:36,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742250_1426 (size=127628) 2024-12-07T17:25:36,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742250_1426 (size=127628) 2024-12-07T17:25:36,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742250_1426 (size=127628) 2024-12-07T17:25:36,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T17:25:36,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T17:25:36,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T17:25:36,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742252_1428 (size=213228) 2024-12-07T17:25:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742252_1428 (size=213228) 2024-12-07T17:25:36,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742252_1428 (size=213228) 2024-12-07T17:25:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T17:25:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T17:25:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T17:25:36,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742254_1430 (size=533455) 2024-12-07T17:25:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742254_1430 (size=533455) 2024-12-07T17:25:36,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742254_1430 (size=533455) 2024-12-07T17:25:36,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T17:25:36,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T17:25:36,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T17:25:36,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T17:25:36,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T17:25:36,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T17:25:36,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742257_1433 (size=20406) 2024-12-07T17:25:36,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742257_1433 (size=20406) 2024-12-07T17:25:36,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742257_1433 (size=20406) 2024-12-07T17:25:36,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742258_1434 (size=75495) 2024-12-07T17:25:36,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742258_1434 (size=75495) 2024-12-07T17:25:36,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742258_1434 (size=75495) 2024-12-07T17:25:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742259_1435 (size=45609) 2024-12-07T17:25:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742259_1435 (size=45609) 2024-12-07T17:25:36,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742259_1435 (size=45609) 2024-12-07T17:25:37,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742260_1436 (size=110084) 2024-12-07T17:25:37,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742260_1436 (size=110084) 2024-12-07T17:25:37,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742260_1436 (size=110084) 2024-12-07T17:25:37,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742261_1437 (size=1323991) 2024-12-07T17:25:37,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742261_1437 (size=1323991) 2024-12-07T17:25:37,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742261_1437 (size=1323991) 2024-12-07T17:25:37,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742262_1438 (size=23076) 2024-12-07T17:25:37,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742262_1438 (size=23076) 2024-12-07T17:25:37,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742262_1438 (size=23076) 2024-12-07T17:25:37,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742263_1439 (size=126803) 2024-12-07T17:25:37,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742263_1439 (size=126803) 2024-12-07T17:25:37,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742263_1439 (size=126803) 2024-12-07T17:25:37,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742264_1440 (size=322274) 2024-12-07T17:25:37,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742264_1440 (size=322274) 2024-12-07T17:25:37,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742264_1440 (size=322274) 2024-12-07T17:25:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742265_1441 (size=1832290) 2024-12-07T17:25:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742265_1441 (size=1832290) 2024-12-07T17:25:37,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742265_1441 (size=1832290) 2024-12-07T17:25:37,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742266_1442 (size=30081) 2024-12-07T17:25:37,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742266_1442 (size=30081) 2024-12-07T17:25:37,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742266_1442 (size=30081) 2024-12-07T17:25:37,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742267_1443 (size=53616) 2024-12-07T17:25:37,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742267_1443 (size=53616) 2024-12-07T17:25:37,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742267_1443 (size=53616) 2024-12-07T17:25:37,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742268_1444 (size=29229) 2024-12-07T17:25:37,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742268_1444 (size=29229) 2024-12-07T17:25:37,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742268_1444 (size=29229) 2024-12-07T17:25:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742269_1445 (size=169089) 2024-12-07T17:25:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742269_1445 (size=169089) 2024-12-07T17:25:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742269_1445 (size=169089) 2024-12-07T17:25:37,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742270_1446 (size=451756) 2024-12-07T17:25:37,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742270_1446 (size=451756) 2024-12-07T17:25:37,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742270_1446 (size=451756) 2024-12-07T17:25:37,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742271_1447 (size=5175431) 2024-12-07T17:25:37,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742271_1447 (size=5175431) 2024-12-07T17:25:37,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742271_1447 (size=5175431) 2024-12-07T17:25:37,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742272_1448 (size=136454) 2024-12-07T17:25:37,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742272_1448 (size=136454) 2024-12-07T17:25:37,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742272_1448 (size=136454) 2024-12-07T17:25:37,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742273_1449 (size=6350154) 2024-12-07T17:25:37,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742273_1449 (size=6350154) 2024-12-07T17:25:37,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742273_1449 (size=6350154) 2024-12-07T17:25:37,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742274_1450 (size=907853) 2024-12-07T17:25:37,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742274_1450 (size=907853) 2024-12-07T17:25:37,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742274_1450 (size=907853) 2024-12-07T17:25:37,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T17:25:37,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T17:25:37,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T17:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742276_1452 (size=503880) 2024-12-07T17:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742276_1452 (size=503880) 2024-12-07T17:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742276_1452 (size=503880) 2024-12-07T17:25:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T17:25:37,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T17:25:37,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T17:25:37,651 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:25:37,654 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-07T17:25:37,656 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:25:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742278_1454 (size=338) 2024-12-07T17:25:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742278_1454 (size=338) 2024-12-07T17:25:37,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742278_1454 (size=338) 2024-12-07T17:25:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742279_1455 (size=15) 2024-12-07T17:25:37,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742279_1455 (size=15) 2024-12-07T17:25:37,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742279_1455 (size=15) 2024-12-07T17:25:37,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742280_1456 (size=304929) 2024-12-07T17:25:37,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742280_1456 (size=304929) 2024-12-07T17:25:37,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742280_1456 (size=304929) 2024-12-07T17:25:37,869 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:25:37,869 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:25:37,881 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0007_000001 (auth:SIMPLE) from 127.0.0.1:40868 2024-12-07T17:25:37,895 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0007/container_1733592173099_0007_01_000001/launch_container.sh] 2024-12-07T17:25:37,896 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0007/container_1733592173099_0007_01_000001/container_tokens] 2024-12-07T17:25:37,896 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_1/usercache/jenkins/appcache/application_1733592173099_0007/container_1733592173099_0007_01_000001/sysfs] 2024-12-07T17:25:37,969 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:46290 2024-12-07T17:25:38,905 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:25:43,250 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:25:43,277 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:50528 2024-12-07T17:25:43,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742281_1457 (size=350603) 2024-12-07T17:25:43,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742281_1457 (size=350603) 2024-12-07T17:25:43,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742281_1457 (size=350603) 2024-12-07T17:25:45,522 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:51080 2024-12-07T17:25:48,313 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:25:48,313 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2a23f4978d5b815f844acd23908df010 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:25:48,313 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 0138678f1a360d7573c5cfe5f7d0a7e0 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:25:48,313 DEBUG [master/5c70517e8267:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region f625811ebdefbb45b189e2610d4c94f1 changed from -1.0 to 0.0, refreshing cache 2024-12-07T17:25:50,287 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000002/launch_container.sh] 2024-12-07T17:25:50,287 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000002/container_tokens] 2024-12-07T17:25:50,287 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_3/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382/archive/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T17:25:51,398 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:35674 2024-12-07T17:25:54,741 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000003/launch_container.sh] 2024-12-07T17:25:54,741 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000003/container_tokens] 2024-12-07T17:25:54,742 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382/archive/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T17:25:56,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 0138678f1a360d7573c5cfe5f7d0a7e0, had cached 0 bytes from a total of 8394 2024-12-07T17:25:56,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, had cached 0 bytes from a total of 5216 2024-12-07T17:25:56,415 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:44010 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/local-export-1733592335382/archive/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T17:25:59,894 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000004/launch_container.sh] 2024-12-07T17:25:59,894 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000004/container_tokens] 2024-12-07T17:25:59,894 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_1/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000004/sysfs] 2024-12-07T17:26:01,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:44020 2024-12-07T17:26:04,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742282_1458 (size=21340) 2024-12-07T17:26:04,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742282_1458 (size=21340) 2024-12-07T17:26:04,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742282_1458 (size=21340) 2024-12-07T17:26:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742283_1459 (size=460) 2024-12-07T17:26:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742283_1459 (size=460) 2024-12-07T17:26:04,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742283_1459 (size=460) 2024-12-07T17:26:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742284_1460 (size=21340) 2024-12-07T17:26:04,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742284_1460 (size=21340) 2024-12-07T17:26:04,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742284_1460 (size=21340) 2024-12-07T17:26:04,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742285_1461 (size=350603) 2024-12-07T17:26:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742285_1461 (size=350603) 2024-12-07T17:26:04,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742285_1461 (size=350603) 2024-12-07T17:26:04,782 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000005/launch_container.sh] 2024-12-07T17:26:04,782 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000005/container_tokens] 2024-12-07T17:26:04,782 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000005/sysfs] 2024-12-07T17:26:04,792 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:39528 2024-12-07T17:26:05,944 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733592173099_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:26:05,945 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945 2024-12-07T17:26:05,945 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:05,989 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:05,989 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T17:26:05,993 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:26:06,001 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T17:26:06,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742286_1462 (size=156) 2024-12-07T17:26:06,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742286_1462 (size=156) 2024-12-07T17:26:06,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742286_1462 (size=156) 2024-12-07T17:26:06,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742287_1463 (size=621) 2024-12-07T17:26:06,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742287_1463 (size=621) 2024-12-07T17:26:06,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742287_1463 (size=621) 2024-12-07T17:26:06,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:06,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:06,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:06,059 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-13313415640441892059.jar 2024-12-07T17:26:07,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-1626963104301195103.jar 2024-12-07T17:26:07,352 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:07,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:26:07,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:26:07,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:26:07,354 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:26:07,355 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:26:07,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:26:07,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:26:07,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:07,356 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:07,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:07,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:07,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:07,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:07,357 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:07,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742288_1464 (size=127628) 2024-12-07T17:26:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742288_1464 (size=127628) 2024-12-07T17:26:07,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742288_1464 (size=127628) 2024-12-07T17:26:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T17:26:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T17:26:07,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T17:26:07,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742290_1466 (size=6350154) 2024-12-07T17:26:07,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742290_1466 (size=6350154) 2024-12-07T17:26:07,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742290_1466 (size=6350154) 2024-12-07T17:26:07,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742291_1467 (size=213228) 2024-12-07T17:26:07,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742291_1467 (size=213228) 2024-12-07T17:26:07,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742291_1467 (size=213228) 2024-12-07T17:26:07,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742292_1468 (size=1877034) 2024-12-07T17:26:07,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742292_1468 (size=1877034) 2024-12-07T17:26:07,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742292_1468 (size=1877034) 2024-12-07T17:26:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742293_1469 (size=533455) 2024-12-07T17:26:07,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742293_1469 (size=533455) 2024-12-07T17:26:07,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742293_1469 (size=533455) 2024-12-07T17:26:07,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742294_1470 (size=7280644) 2024-12-07T17:26:07,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742294_1470 (size=7280644) 2024-12-07T17:26:07,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742294_1470 (size=7280644) 2024-12-07T17:26:07,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742295_1471 (size=4188619) 2024-12-07T17:26:07,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742295_1471 (size=4188619) 2024-12-07T17:26:07,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742295_1471 (size=4188619) 2024-12-07T17:26:07,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742296_1472 (size=20406) 2024-12-07T17:26:07,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742296_1472 (size=20406) 2024-12-07T17:26:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742296_1472 (size=20406) 2024-12-07T17:26:07,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742297_1473 (size=75495) 2024-12-07T17:26:07,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742297_1473 (size=75495) 2024-12-07T17:26:07,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742297_1473 (size=75495) 2024-12-07T17:26:07,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742298_1474 (size=45609) 2024-12-07T17:26:07,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742298_1474 (size=45609) 2024-12-07T17:26:07,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742298_1474 (size=45609) 2024-12-07T17:26:08,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742299_1475 (size=110084) 2024-12-07T17:26:08,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742299_1475 (size=110084) 2024-12-07T17:26:08,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742299_1475 (size=110084) 2024-12-07T17:26:08,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T17:26:08,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T17:26:08,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T17:26:08,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742301_1477 (size=23076) 2024-12-07T17:26:08,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742301_1477 (size=23076) 2024-12-07T17:26:08,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742301_1477 (size=23076) 2024-12-07T17:26:08,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742302_1478 (size=126803) 2024-12-07T17:26:08,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742302_1478 (size=126803) 2024-12-07T17:26:08,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742302_1478 (size=126803) 2024-12-07T17:26:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742303_1479 (size=322274) 2024-12-07T17:26:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742303_1479 (size=322274) 2024-12-07T17:26:08,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742303_1479 (size=322274) 2024-12-07T17:26:08,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T17:26:08,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T17:26:08,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T17:26:08,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742305_1481 (size=30081) 2024-12-07T17:26:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742305_1481 (size=30081) 2024-12-07T17:26:08,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742305_1481 (size=30081) 2024-12-07T17:26:08,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742306_1482 (size=53616) 2024-12-07T17:26:08,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742306_1482 (size=53616) 2024-12-07T17:26:08,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742306_1482 (size=53616) 2024-12-07T17:26:08,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742307_1483 (size=29229) 2024-12-07T17:26:08,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742307_1483 (size=29229) 2024-12-07T17:26:08,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742307_1483 (size=29229) 2024-12-07T17:26:08,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742308_1484 (size=169089) 2024-12-07T17:26:08,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742308_1484 (size=169089) 2024-12-07T17:26:08,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742308_1484 (size=169089) 2024-12-07T17:26:08,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742309_1485 (size=5175431) 2024-12-07T17:26:08,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742309_1485 (size=5175431) 2024-12-07T17:26:08,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742309_1485 (size=5175431) 2024-12-07T17:26:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742310_1486 (size=136454) 2024-12-07T17:26:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742310_1486 (size=136454) 2024-12-07T17:26:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742310_1486 (size=136454) 2024-12-07T17:26:08,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742311_1487 (size=451756) 2024-12-07T17:26:08,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742311_1487 (size=451756) 2024-12-07T17:26:08,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742311_1487 (size=451756) 2024-12-07T17:26:08,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742312_1488 (size=907853) 2024-12-07T17:26:08,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742312_1488 (size=907853) 2024-12-07T17:26:08,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742312_1488 (size=907853) 2024-12-07T17:26:08,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T17:26:08,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T17:26:08,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T17:26:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742314_1490 (size=503880) 2024-12-07T17:26:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742314_1490 (size=503880) 2024-12-07T17:26:08,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742314_1490 (size=503880) 2024-12-07T17:26:08,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T17:26:08,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T17:26:08,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T17:26:08,214 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:26:08,217 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-07T17:26:08,219 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:26:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742316_1492 (size=338) 2024-12-07T17:26:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742316_1492 (size=338) 2024-12-07T17:26:08,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742316_1492 (size=338) 2024-12-07T17:26:08,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742317_1493 (size=15) 2024-12-07T17:26:08,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742317_1493 (size=15) 2024-12-07T17:26:08,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742317_1493 (size=15) 2024-12-07T17:26:08,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742318_1494 (size=304881) 2024-12-07T17:26:08,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742318_1494 (size=304881) 2024-12-07T17:26:08,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742318_1494 (size=304881) 2024-12-07T17:26:10,878 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:26:10,878 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:26:10,885 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0008_000001 (auth:SIMPLE) from 127.0.0.1:39536 2024-12-07T17:26:10,899 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_2/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000001/launch_container.sh] 2024-12-07T17:26:10,899 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_2/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000001/container_tokens] 2024-12-07T17:26:10,899 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_2/usercache/jenkins/appcache/application_1733592173099_0008/container_1733592173099_0008_01_000001/sysfs] 2024-12-07T17:26:11,281 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0009_000001 (auth:SIMPLE) from 127.0.0.1:60298 2024-12-07T17:26:13,251 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:26:16,696 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0009_000001 (auth:SIMPLE) from 127.0.0.1:41752 2024-12-07T17:26:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742319_1495 (size=350555) 2024-12-07T17:26:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742319_1495 (size=350555) 2024-12-07T17:26:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742319_1495 (size=350555) 2024-12-07T17:26:18,921 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0009_000001 (auth:SIMPLE) from 127.0.0.1:40324 2024-12-07T17:26:19,138 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region f625811ebdefbb45b189e2610d4c94f1, had cached 0 bytes from a total of 5492 2024-12-07T17:26:19,142 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 2a23f4978d5b815f844acd23908df010, had cached 0 bytes from a total of 8120 2024-12-07T17:26:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742320_1496 (size=8120) 2024-12-07T17:26:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742320_1496 (size=8120) 2024-12-07T17:26:23,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742320_1496 (size=8120) 2024-12-07T17:26:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742321_1497 (size=5492) 2024-12-07T17:26:23,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742321_1497 (size=5492) 2024-12-07T17:26:23,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742321_1497 (size=5492) 2024-12-07T17:26:23,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742322_1498 (size=17413) 2024-12-07T17:26:23,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742322_1498 (size=17413) 2024-12-07T17:26:23,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742322_1498 (size=17413) 2024-12-07T17:26:23,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742323_1499 (size=462) 2024-12-07T17:26:23,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742323_1499 (size=462) 2024-12-07T17:26:23,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742323_1499 (size=462) 2024-12-07T17:26:23,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000002/launch_container.sh] 2024-12-07T17:26:23,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000002/container_tokens] 2024-12-07T17:26:23,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000002/sysfs] 2024-12-07T17:26:23,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742324_1500 (size=17413) 2024-12-07T17:26:23,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742324_1500 (size=17413) 2024-12-07T17:26:23,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742324_1500 (size=17413) 2024-12-07T17:26:23,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742325_1501 (size=350555) 2024-12-07T17:26:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742325_1501 (size=350555) 2024-12-07T17:26:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742325_1501 (size=350555) 2024-12-07T17:26:23,755 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0009_000001 (auth:SIMPLE) from 127.0.0.1:46400 2024-12-07T17:26:25,414 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:26:25,415 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:26:25,420 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-07T17:26:25,420 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:26:25,421 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:26:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T17:26:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-07T17:26:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-07T17:26:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T17:26:25,422 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-07T17:26:25,422 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592365945/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-07T17:26:25,427 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-07T17:26:25,428 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-07T17:26:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T17:26:25,430 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592385430"}]},"ts":"1733592385430"} 2024-12-07T17:26:25,431 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-07T17:26:25,433 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-07T17:26:25,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-07T17:26:25,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, UNASSIGN}] 2024-12-07T17:26:25,435 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, UNASSIGN 2024-12-07T17:26:25,435 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, UNASSIGN 2024-12-07T17:26:25,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=2a23f4978d5b815f844acd23908df010, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:26:25,436 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=f625811ebdefbb45b189e2610d4c94f1, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:26:25,437 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:26:25,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE; CloseRegionProcedure 2a23f4978d5b815f844acd23908df010, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:26:25,437 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:26:25,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE; CloseRegionProcedure f625811ebdefbb45b189e2610d4c94f1, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:26:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T17:26:25,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:26:25,588 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:26:25,588 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:26:25,588 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close 2a23f4978d5b815f844acd23908df010 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing 2a23f4978d5b815f844acd23908df010, disabling compactions & flushes 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing f625811ebdefbb45b189e2610d4c94f1, disabling compactions & flushes 2024-12-07T17:26:25,589 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:26:25,589 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. after waiting 0 ms 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. after waiting 0 ms 2024-12-07T17:26:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:26:25,593 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010. 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for 2a23f4978d5b815f844acd23908df010: 2024-12-07T17:26:25,593 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1. 2024-12-07T17:26:25,593 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for f625811ebdefbb45b189e2610d4c94f1: 2024-12-07T17:26:25,595 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed 2a23f4978d5b815f844acd23908df010 2024-12-07T17:26:25,595 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=2a23f4978d5b815f844acd23908df010, regionState=CLOSED 2024-12-07T17:26:25,595 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:26:25,595 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=f625811ebdefbb45b189e2610d4c94f1, regionState=CLOSED 2024-12-07T17:26:25,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194 2024-12-07T17:26:25,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; CloseRegionProcedure 2a23f4978d5b815f844acd23908df010, server=5c70517e8267,36163,1733592165482 in 159 msec 2024-12-07T17:26:25,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=193 2024-12-07T17:26:25,598 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a23f4978d5b815f844acd23908df010, UNASSIGN in 162 msec 2024-12-07T17:26:25,599 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=193, state=SUCCESS; CloseRegionProcedure f625811ebdefbb45b189e2610d4c94f1, server=5c70517e8267,41313,1733592165267 in 160 msec 2024-12-07T17:26:25,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192 2024-12-07T17:26:25,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=f625811ebdefbb45b189e2610d4c94f1, UNASSIGN in 163 msec 2024-12-07T17:26:25,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-07T17:26:25,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 166 msec 2024-12-07T17:26:25,602 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592385602"}]},"ts":"1733592385602"} 2024-12-07T17:26:25,603 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-07T17:26:25,604 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-07T17:26:25,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 177 msec 2024-12-07T17:26:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T17:26:25,731 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-07T17:26:25,732 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-07T17:26:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,733 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-07T17:26:25,734 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,735 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-07T17:26:25,737 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:26:25,737 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010 2024-12-07T17:26:25,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,738 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T17:26:25,738 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T17:26:25,739 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T17:26:25,739 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T17:26:25,739 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/recovered.edits] 2024-12-07T17:26:25,739 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/recovered.edits] 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T17:26:25,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:25,741 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:25,741 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-07T17:26:25,741 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:25,741 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:25,744 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/cf/ab72d64fd3d644368c6d49615acf9ecf 2024-12-07T17:26:25,744 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/cf/cae54eab1cbd401ba51d692732f507aa 2024-12-07T17:26:25,746 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1/recovered.edits/9.seqid 2024-12-07T17:26:25,746 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010/recovered.edits/9.seqid 2024-12-07T17:26:25,747 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/f625811ebdefbb45b189e2610d4c94f1 2024-12-07T17:26:25,747 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportWithChecksum/2a23f4978d5b815f844acd23908df010 2024-12-07T17:26:25,747 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-07T17:26:25,749 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,750 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-07T17:26:25,752 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-07T17:26:25,753 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,753 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-07T17:26:25,753 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592385753"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:25,753 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592385753"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:25,755 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:26:25,755 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f625811ebdefbb45b189e2610d4c94f1, NAME => 'testtb-testExportWithChecksum,,1733592333698.f625811ebdefbb45b189e2610d4c94f1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2a23f4978d5b815f844acd23908df010, NAME => 'testtb-testExportWithChecksum,1,1733592333698.2a23f4978d5b815f844acd23908df010.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:26:25,755 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-07T17:26:25,755 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592385755"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:25,756 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-07T17:26:25,758 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T17:26:25,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 26 msec 2024-12-07T17:26:25,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-07T17:26:25,842 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-07T17:26:25,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-07T17:26:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-07T17:26:25,850 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-07T17:26:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-07T17:26:25,871 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=798 (was 803), OpenFileDescriptor=801 (was 805), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=421 (was 483), ProcessCount=17 (was 17), AvailableMemoryMB=3933 (was 4492) 2024-12-07T17:26:25,871 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-07T17:26:25,888 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=798, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=421, ProcessCount=17, AvailableMemoryMB=3932 2024-12-07T17:26:25,888 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-07T17:26:25,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T17:26:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:25,891 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T17:26:25,891 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:26:25,891 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-07T17:26:25,891 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T17:26:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T17:26:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742326_1502 (size=418) 2024-12-07T17:26:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742326_1502 (size=418) 2024-12-07T17:26:25,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742326_1502 (size=418) 2024-12-07T17:26:25,899 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 14cffa573abf99bf822272c643bd41e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:25,899 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 5a0752b327a26effa3328009df4dd771, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:25,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742327_1503 (size=79) 2024-12-07T17:26:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742327_1503 (size=79) 2024-12-07T17:26:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742327_1503 (size=79) 2024-12-07T17:26:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742328_1504 (size=79) 2024-12-07T17:26:25,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742328_1504 (size=79) 2024-12-07T17:26:25,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742328_1504 (size=79) 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 5a0752b327a26effa3328009df4dd771, disabling compactions & flushes 2024-12-07T17:26:25,914 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. after waiting 0 ms 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:25,914 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:25,914 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 5a0752b327a26effa3328009df4dd771: 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing 14cffa573abf99bf822272c643bd41e1, disabling compactions & flushes 2024-12-07T17:26:25,916 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. after waiting 0 ms 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:25,916 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:25,916 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for 14cffa573abf99bf822272c643bd41e1: 2024-12-07T17:26:25,917 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T17:26:25,917 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733592385917"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592385917"}]},"ts":"1733592385917"} 2024-12-07T17:26:25,917 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733592385917"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733592385917"}]},"ts":"1733592385917"} 2024-12-07T17:26:25,919 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T17:26:25,920 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T17:26:25,920 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592385920"}]},"ts":"1733592385920"} 2024-12-07T17:26:25,921 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-07T17:26:25,924 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {5c70517e8267=0} racks are {/default-rack=0} 2024-12-07T17:26:25,926 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T17:26:25,926 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T17:26:25,926 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T17:26:25,926 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T17:26:25,926 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T17:26:25,926 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T17:26:25,926 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T17:26:25,926 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, ASSIGN}] 2024-12-07T17:26:25,927 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, ASSIGN 2024-12-07T17:26:25,927 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, ASSIGN 2024-12-07T17:26:25,927 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, ASSIGN; state=OFFLINE, location=5c70517e8267,36163,1733592165482; forceNewPlan=false, retain=false 2024-12-07T17:26:25,927 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, ASSIGN; state=OFFLINE, location=5c70517e8267,41313,1733592165267; forceNewPlan=false, retain=false 2024-12-07T17:26:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T17:26:26,078 INFO [5c70517e8267:36895 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T17:26:26,078 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=14cffa573abf99bf822272c643bd41e1, regionState=OPENING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:26:26,078 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=5a0752b327a26effa3328009df4dd771, regionState=OPENING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:26:26,080 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=199, state=RUNNABLE; OpenRegionProcedure 14cffa573abf99bf822272c643bd41e1, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:26:26,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=200, state=RUNNABLE; OpenRegionProcedure 5a0752b327a26effa3328009df4dd771, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:26:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T17:26:26,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:26:26,232 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:26:26,234 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,234 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 14cffa573abf99bf822272c643bd41e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T17:26:26,234 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => 5a0752b327a26effa3328009df4dd771, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. service=AccessControlService 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. service=AccessControlService 2024-12-07T17:26:26,235 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:26:26,235 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,235 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,237 INFO [StoreOpener-14cffa573abf99bf822272c643bd41e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,238 INFO [StoreOpener-14cffa573abf99bf822272c643bd41e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14cffa573abf99bf822272c643bd41e1 columnFamilyName cf 2024-12-07T17:26:26,238 DEBUG [StoreOpener-14cffa573abf99bf822272c643bd41e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:26:26,239 INFO [StoreOpener-14cffa573abf99bf822272c643bd41e1-1 {}] regionserver.HStore(327): Store=14cffa573abf99bf822272c643bd41e1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:26:26,240 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,240 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,242 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,244 INFO [StoreOpener-5a0752b327a26effa3328009df4dd771-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,244 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:26:26,245 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 14cffa573abf99bf822272c643bd41e1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67242717, jitterRate=0.0019945651292800903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:26:26,245 INFO [StoreOpener-5a0752b327a26effa3328009df4dd771-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a0752b327a26effa3328009df4dd771 columnFamilyName cf 2024-12-07T17:26:26,245 DEBUG [StoreOpener-5a0752b327a26effa3328009df4dd771-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T17:26:26,246 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 14cffa573abf99bf822272c643bd41e1: 2024-12-07T17:26:26,246 INFO [StoreOpener-5a0752b327a26effa3328009df4dd771-1 {}] regionserver.HStore(327): Store=5a0752b327a26effa3328009df4dd771/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T17:26:26,246 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1., pid=201, masterSystemTime=1733592386231 2024-12-07T17:26:26,246 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,247 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,248 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,248 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,248 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=14cffa573abf99bf822272c643bd41e1, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:26:26,249 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=199 2024-12-07T17:26:26,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=199, state=SUCCESS; OpenRegionProcedure 14cffa573abf99bf822272c643bd41e1, server=5c70517e8267,36163,1733592165482 in 171 msec 2024-12-07T17:26:26,252 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, ASSIGN in 325 msec 2024-12-07T17:26:26,257 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T17:26:26,257 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened 5a0752b327a26effa3328009df4dd771; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62510049, jitterRate=-0.06852768361568451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T17:26:26,257 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for 5a0752b327a26effa3328009df4dd771: 2024-12-07T17:26:26,258 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771., pid=202, masterSystemTime=1733592386232 2024-12-07T17:26:26,259 DEBUG [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:26,259 INFO [RS_OPEN_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:26,259 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=5a0752b327a26effa3328009df4dd771, regionState=OPEN, openSeqNum=2, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:26:26,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=200 2024-12-07T17:26:26,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=200, state=SUCCESS; OpenRegionProcedure 5a0752b327a26effa3328009df4dd771, server=5c70517e8267,41313,1733592165267 in 180 msec 2024-12-07T17:26:26,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=200, resume processing ppid=198 2024-12-07T17:26:26,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, ASSIGN in 336 msec 2024-12-07T17:26:26,263 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T17:26:26,263 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592386263"}]},"ts":"1733592386263"} 2024-12-07T17:26:26,264 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-07T17:26:26,267 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T17:26:26,267 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-07T17:26:26,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T17:26:26,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:26,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:26,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:26,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:26,278 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,279 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:26,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 390 msec 2024-12-07T17:26:26,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T17:26:26,494 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-07T17:26:26,494 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-07T17:26:26,494 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:26:26,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38423 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-07T17:26:26,502 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-07T17:26:26,502 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:26:26,502 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-07T17:26:26,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T17:26:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592386504 (current time:1733592386504). 2024-12-07T17:26:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:26:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-07T17:26:26,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:26:26,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x110573e1 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43dbf4e3 2024-12-07T17:26:26,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53afdac8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:26:26,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,514 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x110573e1 to 127.0.0.1:58133 2024-12-07T17:26:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:26:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6edf9c3f to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@723f01eb 2024-12-07T17:26:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5febfcfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:26:26,527 DEBUG [hconnection-0x7002868e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,528 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,530 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6edf9c3f to 127.0.0.1:58133 2024-12-07T17:26:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:26:26,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T17:26:26,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:26:26,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T17:26:26,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-07T17:26:26,533 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:26:26,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T17:26:26,534 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:26:26,536 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:26:26,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742329_1505 (size=203) 2024-12-07T17:26:26,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742329_1505 (size=203) 2024-12-07T17:26:26,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742329_1505 (size=203) 2024-12-07T17:26:26,554 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:26:26,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771}] 2024-12-07T17:26:26,555 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,555 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T17:26:26,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:26:26,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:26:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-07T17:26:26,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 5a0752b327a26effa3328009df4dd771: 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for 14cffa573abf99bf822272c643bd41e1: 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:26:26,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T17:26:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742330_1506 (size=82) 2024-12-07T17:26:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742331_1507 (size=82) 2024-12-07T17:26:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742331_1507 (size=82) 2024-12-07T17:26:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742330_1506 (size=82) 2024-12-07T17:26:26,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742331_1507 (size=82) 2024-12-07T17:26:26,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742330_1506 (size=82) 2024-12-07T17:26:26,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:26,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-07T17:26:26,715 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-07T17:26:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-07T17:26:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-07T17:26:26,715 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,715 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,716 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 in 162 msec 2024-12-07T17:26:26,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=205, resume processing ppid=203 2024-12-07T17:26:26,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 in 162 msec 2024-12-07T17:26:26,718 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:26:26,718 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:26:26,719 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:26:26,719 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,719 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742332_1508 (size=585) 2024-12-07T17:26:26,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742332_1508 (size=585) 2024-12-07T17:26:26,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742332_1508 (size=585) 2024-12-07T17:26:26,731 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:26:26,734 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:26:26,734 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,735 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:26:26,735 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-07T17:26:26,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 203 msec 2024-12-07T17:26:26,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T17:26:26,835 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-07T17:26:26,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36163 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:26:26,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41313 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T17:26:26,847 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:26,847 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:26,847 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T17:26:26,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T17:26:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733592386856 (current time:1733592386856). 2024-12-07T17:26:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T17:26:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-07T17:26:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T17:26:26,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ebf93de to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22b86f65 2024-12-07T17:26:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34507bc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:26:26,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,862 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ebf93de to 127.0.0.1:58133 2024-12-07T17:26:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:26:26,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x16d430a7 to 127.0.0.1:58133 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@77f6a65f 2024-12-07T17:26:26,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404f155b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T17:26:26,868 DEBUG [hconnection-0x5fa30826-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,869 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T17:26:26,871 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T17:26:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x16d430a7 to 127.0.0.1:58133 2024-12-07T17:26:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:26:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T17:26:26,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T17:26:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T17:26:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-07T17:26:26,873 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T17:26:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T17:26:26,874 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T17:26:26,876 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T17:26:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742333_1509 (size=198) 2024-12-07T17:26:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742333_1509 (size=198) 2024-12-07T17:26:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742333_1509 (size=198) 2024-12-07T17:26:26,882 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T17:26:26,882 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771}] 2024-12-07T17:26:26,883 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:26,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:26,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T17:26:27,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:26:27,034 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:26:27,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41313 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-07T17:26:27,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36163 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-07T17:26:27,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:27,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:27,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 5a0752b327a26effa3328009df4dd771 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-07T17:26:27,035 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing 14cffa573abf99bf822272c643bd41e1 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-07T17:26:27,054 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/.tmp/cf/5e6b009b50c54b8c80d75da0c9dcf319 is 71, key is 16491dcadffc71202f878b6a74e5a8d7/cf:q/1733592386843/Put/seqid=0 2024-12-07T17:26:27,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/.tmp/cf/9563ffcc6747405187846b395ec3d480 is 71, key is 0216544db94dfc4274b8e8d1ab7b52f6/cf:q/1733592386841/Put/seqid=0 2024-12-07T17:26:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742334_1510 (size=8122) 2024-12-07T17:26:27,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742334_1510 (size=8122) 2024-12-07T17:26:27,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742334_1510 (size=8122) 2024-12-07T17:26:27,061 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/.tmp/cf/5e6b009b50c54b8c80d75da0c9dcf319 2024-12-07T17:26:27,067 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/.tmp/cf/5e6b009b50c54b8c80d75da0c9dcf319 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319 2024-12-07T17:26:27,073 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319, entries=44, sequenceid=6, filesize=7.9 K 2024-12-07T17:26:27,074 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 5a0752b327a26effa3328009df4dd771 in 39ms, sequenceid=6, compaction requested=false 2024-12-07T17:26:27,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 5a0752b327a26effa3328009df4dd771: 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319] hfiles 2024-12-07T17:26:27,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742335_1511 (size=5490) 2024-12-07T17:26:27,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742335_1511 (size=5490) 2024-12-07T17:26:27,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742335_1511 (size=5490) 2024-12-07T17:26:27,092 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/.tmp/cf/9563ffcc6747405187846b395ec3d480 2024-12-07T17:26:27,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/.tmp/cf/9563ffcc6747405187846b395ec3d480 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480 2024-12-07T17:26:27,103 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480, entries=6, sequenceid=6, filesize=5.4 K 2024-12-07T17:26:27,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 14cffa573abf99bf822272c643bd41e1 in 69ms, sequenceid=6, compaction requested=false 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for 14cffa573abf99bf822272c643bd41e1: 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480] hfiles 2024-12-07T17:26:27,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742336_1512 (size=121) 2024-12-07T17:26:27,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742336_1512 (size=121) 2024-12-07T17:26:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742336_1512 (size=121) 2024-12-07T17:26:27,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:27,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-07T17:26:27,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-07T17:26:27,115 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:27,115 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:27,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 5a0752b327a26effa3328009df4dd771 in 237 msec 2024-12-07T17:26:27,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742337_1513 (size=121) 2024-12-07T17:26:27,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742337_1513 (size=121) 2024-12-07T17:26:27,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742337_1513 (size=121) 2024-12-07T17:26:27,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:27,128 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/5c70517e8267:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-07T17:26:27,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-07T17:26:27,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:27,129 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:27,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-07T17:26:27,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure 14cffa573abf99bf822272c643bd41e1 in 248 msec 2024-12-07T17:26:27,131 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T17:26:27,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T17:26:27,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T17:26:27,132 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,133 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742338_1514 (size=663) 2024-12-07T17:26:27,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742338_1514 (size=663) 2024-12-07T17:26:27,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742338_1514 (size=663) 2024-12-07T17:26:27,145 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T17:26:27,149 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T17:26:27,150 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,151 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T17:26:27,151 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-07T17:26:27,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 279 msec 2024-12-07T17:26:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T17:26:27,175 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-07T17:26:27,175 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175 2024-12-07T17:26:27,176 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:33765, tgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175, rawTgtDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175, srcFsUri=hdfs://localhost:33765, srcDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:27,207 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:33765, inputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc 2024-12-07T17:26:27,207 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T17:26:27,213 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:27,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742339_1515 (size=198) 2024-12-07T17:26:27,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742339_1515 (size=198) 2024-12-07T17:26:27,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742339_1515 (size=198) 2024-12-07T17:26:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742340_1516 (size=663) 2024-12-07T17:26:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742340_1516 (size=663) 2024-12-07T17:26:27,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742340_1516 (size=663) 2024-12-07T17:26:27,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:27,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:27,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:27,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-1096526678194623595.jar 2024-12-07T17:26:28,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,482 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop-10868613912175599398.jar 2024-12-07T17:26:28,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T17:26:28,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T17:26:28,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T17:26:28,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T17:26:28,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T17:26:28,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T17:26:28,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T17:26:28,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T17:26:28,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:28,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:28,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:28,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:28,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T17:26:28,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:28,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T17:26:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742341_1517 (size=127628) 2024-12-07T17:26:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742341_1517 (size=127628) 2024-12-07T17:26:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742341_1517 (size=127628) 2024-12-07T17:26:28,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T17:26:28,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T17:26:28,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T17:26:28,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742343_1519 (size=213228) 2024-12-07T17:26:28,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742343_1519 (size=213228) 2024-12-07T17:26:28,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742343_1519 (size=213228) 2024-12-07T17:26:28,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T17:26:28,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T17:26:28,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T17:26:28,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742345_1521 (size=533455) 2024-12-07T17:26:28,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742345_1521 (size=533455) 2024-12-07T17:26:28,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742345_1521 (size=533455) 2024-12-07T17:26:28,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742346_1522 (size=7280644) 2024-12-07T17:26:28,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742346_1522 (size=7280644) 2024-12-07T17:26:28,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742346_1522 (size=7280644) 2024-12-07T17:26:28,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742347_1523 (size=4188619) 2024-12-07T17:26:28,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742347_1523 (size=4188619) 2024-12-07T17:26:28,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742347_1523 (size=4188619) 2024-12-07T17:26:28,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742348_1524 (size=20406) 2024-12-07T17:26:28,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742348_1524 (size=20406) 2024-12-07T17:26:28,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742348_1524 (size=20406) 2024-12-07T17:26:28,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742349_1525 (size=75495) 2024-12-07T17:26:28,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742349_1525 (size=75495) 2024-12-07T17:26:28,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742349_1525 (size=75495) 2024-12-07T17:26:28,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742350_1526 (size=6350154) 2024-12-07T17:26:28,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742350_1526 (size=6350154) 2024-12-07T17:26:28,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742350_1526 (size=6350154) 2024-12-07T17:26:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742351_1527 (size=45609) 2024-12-07T17:26:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742351_1527 (size=45609) 2024-12-07T17:26:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742351_1527 (size=45609) 2024-12-07T17:26:28,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742352_1528 (size=110084) 2024-12-07T17:26:28,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742352_1528 (size=110084) 2024-12-07T17:26:28,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742352_1528 (size=110084) 2024-12-07T17:26:28,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742353_1529 (size=1323991) 2024-12-07T17:26:28,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742353_1529 (size=1323991) 2024-12-07T17:26:28,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742353_1529 (size=1323991) 2024-12-07T17:26:28,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742354_1530 (size=451756) 2024-12-07T17:26:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742354_1530 (size=451756) 2024-12-07T17:26:28,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742354_1530 (size=451756) 2024-12-07T17:26:29,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742355_1531 (size=23076) 2024-12-07T17:26:29,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742355_1531 (size=23076) 2024-12-07T17:26:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742355_1531 (size=23076) 2024-12-07T17:26:29,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742356_1532 (size=126803) 2024-12-07T17:26:29,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742356_1532 (size=126803) 2024-12-07T17:26:29,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742356_1532 (size=126803) 2024-12-07T17:26:29,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742357_1533 (size=322274) 2024-12-07T17:26:29,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742357_1533 (size=322274) 2024-12-07T17:26:29,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742357_1533 (size=322274) 2024-12-07T17:26:29,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T17:26:29,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T17:26:29,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T17:26:29,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742359_1535 (size=30081) 2024-12-07T17:26:29,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742359_1535 (size=30081) 2024-12-07T17:26:29,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742359_1535 (size=30081) 2024-12-07T17:26:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742360_1536 (size=53616) 2024-12-07T17:26:29,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742360_1536 (size=53616) 2024-12-07T17:26:29,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742360_1536 (size=53616) 2024-12-07T17:26:29,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742361_1537 (size=29229) 2024-12-07T17:26:29,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742361_1537 (size=29229) 2024-12-07T17:26:29,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742361_1537 (size=29229) 2024-12-07T17:26:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742362_1538 (size=169089) 2024-12-07T17:26:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742362_1538 (size=169089) 2024-12-07T17:26:29,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742362_1538 (size=169089) 2024-12-07T17:26:29,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T17:26:29,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T17:26:29,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T17:26:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742364_1540 (size=136454) 2024-12-07T17:26:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742364_1540 (size=136454) 2024-12-07T17:26:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742364_1540 (size=136454) 2024-12-07T17:26:29,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742365_1541 (size=907853) 2024-12-07T17:26:29,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742365_1541 (size=907853) 2024-12-07T17:26:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742365_1541 (size=907853) 2024-12-07T17:26:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T17:26:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T17:26:29,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T17:26:29,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742367_1543 (size=503880) 2024-12-07T17:26:29,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742367_1543 (size=503880) 2024-12-07T17:26:29,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742367_1543 (size=503880) 2024-12-07T17:26:29,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T17:26:29,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T17:26:29,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T17:26:29,201 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T17:26:29,203 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-07T17:26:29,205 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T17:26:29,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742369_1545 (size=366) 2024-12-07T17:26:29,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742369_1545 (size=366) 2024-12-07T17:26:29,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742369_1545 (size=366) 2024-12-07T17:26:29,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742370_1546 (size=15) 2024-12-07T17:26:29,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742370_1546 (size=15) 2024-12-07T17:26:29,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742370_1546 (size=15) 2024-12-07T17:26:29,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742371_1547 (size=305055) 2024-12-07T17:26:29,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742371_1547 (size=305055) 2024-12-07T17:26:29,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742371_1547 (size=305055) 2024-12-07T17:26:29,845 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:26:29,845 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T17:26:29,848 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0009_000001 (auth:SIMPLE) from 127.0.0.1:48356 2024-12-07T17:26:29,869 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000001/launch_container.sh] 2024-12-07T17:26:29,869 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000001/container_tokens] 2024-12-07T17:26:29,869 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-1_0/usercache/jenkins/appcache/application_1733592173099_0009/container_1733592173099_0009_01_000001/sysfs] 2024-12-07T17:26:30,690 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0010_000001 (auth:SIMPLE) from 127.0.0.1:46412 2024-12-07T17:26:31,133 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:26:34,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:34,860 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-07T17:26:34,861 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-07T17:26:36,069 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0010_000001 (auth:SIMPLE) from 127.0.0.1:60536 2024-12-07T17:26:36,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742372_1548 (size=350753) 2024-12-07T17:26:36,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742372_1548 (size=350753) 2024-12-07T17:26:36,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742372_1548 (size=350753) 2024-12-07T17:26:38,345 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0010_000001 (auth:SIMPLE) from 127.0.0.1:49086 2024-12-07T17:26:40,364 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:26:41,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 0138678f1a360d7573c5cfe5f7d0a7e0, had cached 0 bytes from a total of 8394 2024-12-07T17:26:41,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, had cached 0 bytes from a total of 5216 2024-12-07T17:26:42,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742373_1549 (size=8122) 2024-12-07T17:26:42,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742373_1549 (size=8122) 2024-12-07T17:26:42,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742373_1549 (size=8122) 2024-12-07T17:26:42,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742374_1550 (size=5490) 2024-12-07T17:26:42,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742374_1550 (size=5490) 2024-12-07T17:26:42,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742374_1550 (size=5490) 2024-12-07T17:26:42,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742375_1551 (size=17455) 2024-12-07T17:26:42,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742375_1551 (size=17455) 2024-12-07T17:26:42,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742375_1551 (size=17455) 2024-12-07T17:26:42,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742376_1552 (size=476) 2024-12-07T17:26:42,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742376_1552 (size=476) 2024-12-07T17:26:42,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742376_1552 (size=476) 2024-12-07T17:26:42,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742377_1553 (size=17455) 2024-12-07T17:26:42,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742377_1553 (size=17455) 2024-12-07T17:26:42,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742377_1553 (size=17455) 2024-12-07T17:26:42,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742378_1554 (size=350753) 2024-12-07T17:26:42,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742378_1554 (size=350753) 2024-12-07T17:26:42,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742378_1554 (size=350753) 2024-12-07T17:26:42,586 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0010_000001 (auth:SIMPLE) from 127.0.0.1:49092 2024-12-07T17:26:43,251 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:26:44,427 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T17:26:44,427 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T17:26:44,433 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,433 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T17:26:44,434 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T17:26:44,434 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,434 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-07T17:26:44,434 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-07T17:26:44,434 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1614647307_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,435 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-07T17:26:44,435 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/export-test/export-1733592387175/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-07T17:26:44,440 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,440 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T17:26:44,443 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592404443"}]},"ts":"1733592404443"} 2024-12-07T17:26:44,444 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-07T17:26:44,446 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-07T17:26:44,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-07T17:26:44,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, UNASSIGN}] 2024-12-07T17:26:44,448 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, UNASSIGN 2024-12-07T17:26:44,448 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, UNASSIGN 2024-12-07T17:26:44,449 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=14cffa573abf99bf822272c643bd41e1, regionState=CLOSING, regionLocation=5c70517e8267,36163,1733592165482 2024-12-07T17:26:44,449 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=5a0752b327a26effa3328009df4dd771, regionState=CLOSING, regionLocation=5c70517e8267,41313,1733592165267 2024-12-07T17:26:44,450 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:26:44,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=211, state=RUNNABLE; CloseRegionProcedure 14cffa573abf99bf822272c643bd41e1, server=5c70517e8267,36163,1733592165482}] 2024-12-07T17:26:44,450 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T17:26:44,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=212, state=RUNNABLE; CloseRegionProcedure 5a0752b327a26effa3328009df4dd771, server=5c70517e8267,41313,1733592165267}] 2024-12-07T17:26:44,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T17:26:44,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5c70517e8267,41313,1733592165267 2024-12-07T17:26:44,602 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5c70517e8267,36163,1733592165482 2024-12-07T17:26:44,602 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:44,602 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 14cffa573abf99bf822272c643bd41e1, disabling compactions & flushes 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing 5a0752b327a26effa3328009df4dd771, disabling compactions & flushes 2024-12-07T17:26:44,602 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:44,602 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. after waiting 0 ms 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:44,602 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:44,603 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. after waiting 0 ms 2024-12-07T17:26:44,603 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:26:44,607 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771. 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for 5a0752b327a26effa3328009df4dd771: 2024-12-07T17:26:44,607 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1. 2024-12-07T17:26:44,607 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 14cffa573abf99bf822272c643bd41e1: 2024-12-07T17:26:44,608 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed 5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:44,609 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=5a0752b327a26effa3328009df4dd771, regionState=CLOSED 2024-12-07T17:26:44,609 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:44,609 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=14cffa573abf99bf822272c643bd41e1, regionState=CLOSED 2024-12-07T17:26:44,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=212 2024-12-07T17:26:44,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=212, state=SUCCESS; CloseRegionProcedure 5a0752b327a26effa3328009df4dd771, server=5c70517e8267,41313,1733592165267 in 160 msec 2024-12-07T17:26:44,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=211 2024-12-07T17:26:44,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=211, state=SUCCESS; CloseRegionProcedure 14cffa573abf99bf822272c643bd41e1, server=5c70517e8267,36163,1733592165482 in 160 msec 2024-12-07T17:26:44,612 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=5a0752b327a26effa3328009df4dd771, UNASSIGN in 163 msec 2024-12-07T17:26:44,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=211, resume processing ppid=210 2024-12-07T17:26:44,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=14cffa573abf99bf822272c643bd41e1, UNASSIGN in 164 msec 2024-12-07T17:26:44,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-07T17:26:44,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 167 msec 2024-12-07T17:26:44,615 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733592404615"}]},"ts":"1733592404615"} 2024-12-07T17:26:44,616 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-07T17:26:44,618 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-07T17:26:44,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 178 msec 2024-12-07T17:26:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T17:26:44,744 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-07T17:26:44,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,746 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,746 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,748 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36163 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,749 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:44,749 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:44,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,751 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/recovered.edits] 2024-12-07T17:26:44,751 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf, FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/recovered.edits] 2024-12-07T17:26:44,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T17:26:44,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:44,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T17:26:44,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data null 2024-12-07T17:26:44,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data null 2024-12-07T17:26:44,753 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:26:44,753 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T17:26:44,754 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:44,754 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:44,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-07T17:26:44,754 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:44,754 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T17:26:44,755 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/cf/9563ffcc6747405187846b395ec3d480 2024-12-07T17:26:44,755 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319 to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/cf/5e6b009b50c54b8c80d75da0c9dcf319 2024-12-07T17:26:44,757 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1/recovered.edits/9.seqid 2024-12-07T17:26:44,757 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/recovered.edits/9.seqid to hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771/recovered.edits/9.seqid 2024-12-07T17:26:44,758 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/14cffa573abf99bf822272c643bd41e1 2024-12-07T17:26:44,758 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testtb-testExportFileSystemStateWithSkipTmp/5a0752b327a26effa3328009df4dd771 2024-12-07T17:26:44,758 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-07T17:26:44,759 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,761 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-07T17:26:44,763 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-07T17:26:44,764 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,764 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-07T17:26:44,764 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592404764"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:44,764 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733592404764"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:44,765 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T17:26:44,765 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 14cffa573abf99bf822272c643bd41e1, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733592385889.14cffa573abf99bf822272c643bd41e1.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5a0752b327a26effa3328009df4dd771, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733592385889.5a0752b327a26effa3328009df4dd771.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T17:26:44,766 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-07T17:26:44,766 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733592404766"}]},"ts":"9223372036854775807"} 2024-12-07T17:26:44,767 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-07T17:26:44,769 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,769 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 24 msec 2024-12-07T17:26:44,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-07T17:26:44,855 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-07T17:26:44,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-07T17:26:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-07T17:26:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T17:26:44,887 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=797 (was 798), OpenFileDescriptor=790 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=447 (was 421) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=3804 (was 3932) 2024-12-07T17:26:44,887 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-07T17:26:44,887 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-07T17:26:44,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e35c669{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T17:26:44,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cae447d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:26:44,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:26:44,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d7dfc02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T17:26:44,899 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27b49aa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:26:47,614 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000002/launch_container.sh] 2024-12-07T17:26:47,614 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000002/container_tokens] 2024-12-07T17:26:47,614 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_0/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000002/sysfs] 2024-12-07T17:26:48,660 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733592173099_0010_000001 (auth:SIMPLE) from 127.0.0.1:40690 2024-12-07T17:26:48,671 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000001/launch_container.sh] 2024-12-07T17:26:48,671 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000001/container_tokens] 2024-12-07T17:26:48,671 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/MiniMRCluster_1320144480/yarn-1837167634/MiniMRCluster_1320144480-localDir-nm-0_3/usercache/jenkins/appcache/application_1733592173099_0010/container_1733592173099_0010_01_000001/sysfs] 2024-12-07T17:26:50,109 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:27:01,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@573bf81a{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T17:27:01,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f654859{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:27:01,911 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:27:01,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5423671f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T17:27:01,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a76a006{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:27:13,251 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:27:18,919 ERROR [Thread[Thread-406,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T17:27:18,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16715c05{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-07T17:27:18,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@785080a6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:27:18,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:27:18,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ef1f8fe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T17:27:18,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fe36ded{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:27:18,925 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-07T17:27:18,931 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-07T17:27:18,931 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-07T17:27:18,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741830_1006 (size=947297) 2024-12-07T17:27:18,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741830_1006 (size=947297) 2024-12-07T17:27:18,936 ERROR [Thread[Thread-433,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T17:27:18,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a1c4462{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-07T17:27:18,940 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ae48202{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:27:18,941 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:27:18,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e8b35f5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T17:27:18,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20d16c0e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:27:18,942 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T17:27:18,942 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-07T17:27:18,942 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-07T17:27:18,943 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T17:27:18,943 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x112a7c4c to 127.0.0.1:58133 2024-12-07T17:27:18,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:18,943 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T17:27:18,943 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1225663917, stopped=false 2024-12-07T17:27:18,944 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,944 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T17:27:18,944 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=5c70517e8267,36895,1733592164285 2024-12-07T17:27:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:27:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:27:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:27:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:27:18,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:27:18,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:27:18,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T17:27:18,946 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-07T17:27:18,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:27:18,946 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:18,946 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5c70517e8267,41313,1733592165267' ***** 2024-12-07T17:27:18,946 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,946 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T17:27:18,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:27:18,946 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5c70517e8267,38423,1733592165361' ***** 2024-12-07T17:27:18,946 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,946 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T17:27:18,947 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5c70517e8267,36163,1733592165482' ***** 2024-12-07T17:27:18,947 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:27:18,947 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,947 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:27:18,947 INFO [RS:0;5c70517e8267:41313 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:27:18,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:27:18,947 INFO [RS:0;5c70517e8267:41313 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:27:18,947 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1224): stopping server 5c70517e8267,38423,1733592165361 2024-12-07T17:27:18,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:27:18,947 INFO [RS:2;5c70517e8267:36163 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T17:27:18,947 DEBUG [RS:1;5c70517e8267:38423 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:18,947 INFO [RS:2;5c70517e8267:36163 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T17:27:18,947 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T17:27:18,947 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(3579): Received CLOSE for 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:27:18,947 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3579): Received CLOSE for 27a2410ab2d9b432daa26ec0ea504b68 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:27:18,947 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-07T17:27:18,947 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T17:27:18,948 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1224): stopping server 5c70517e8267,41313,1733592165267 2024-12-07T17:27:18,948 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3579): Received CLOSE for 6860b2d9133d857f1acf6c9fb2cd82f1 2024-12-07T17:27:18,948 DEBUG [RS:0;5c70517e8267:41313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:18,948 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(3579): Received CLOSE for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:27:18,948 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1224): stopping server 5c70517e8267,36163,1733592165482 2024-12-07T17:27:18,948 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-07T17:27:18,948 DEBUG [RS:2;5c70517e8267:36163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:18,948 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1603): Online Regions={0138678f1a360d7573c5cfe5f7d0a7e0=testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0.} 2024-12-07T17:27:18,948 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1599): Waiting on 3 regions to close 2024-12-07T17:27:18,948 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1603): Online Regions={27a2410ab2d9b432daa26ec0ea504b68=hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68., 6860b2d9133d857f1acf6c9fb2cd82f1=hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1., 95a53dd0f0fa93ab4e6bebd2e0c2e9c5=testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5.} 2024-12-07T17:27:18,948 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 27a2410ab2d9b432daa26ec0ea504b68, disabling compactions & flushes 2024-12-07T17:27:18,948 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. after waiting 0 ms 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 0138678f1a360d7573c5cfe5f7d0a7e0, disabling compactions & flushes 2024-12-07T17:27:18,948 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:27:18,948 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. after waiting 0 ms 2024-12-07T17:27:18,948 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-07T17:27:18,949 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:27:18,949 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T17:27:18,949 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 27a2410ab2d9b432daa26ec0ea504b68 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-07T17:27:18,949 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T17:27:18,949 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T17:27:18,949 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T17:27:18,949 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T17:27:18,949 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T17:27:18,949 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-07T17:27:18,952 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T17:27:18,952 DEBUG [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1629): Waiting on 27a2410ab2d9b432daa26ec0ea504b68, 6860b2d9133d857f1acf6c9fb2cd82f1, 95a53dd0f0fa93ab4e6bebd2e0c2e9c5 2024-12-07T17:27:18,952 DEBUG [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1629): Waiting on 0138678f1a360d7573c5cfe5f7d0a7e0 2024-12-07T17:27:18,953 DEBUG [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-07T17:27:18,955 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/0138678f1a360d7573c5cfe5f7d0a7e0/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:27:18,956 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,956 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:27:18,956 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 0138678f1a360d7573c5cfe5f7d0a7e0: 2024-12-07T17:27:18,956 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0. 2024-12-07T17:27:18,967 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/.tmp/l/0764005db35a4bb3af5de62d9e417b1c is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733592308483/DeleteFamily/seqid=0 2024-12-07T17:27:18,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742379_1555 (size=5695) 2024-12-07T17:27:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742379_1555 (size=5695) 2024-12-07T17:27:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742379_1555 (size=5695) 2024-12-07T17:27:18,973 INFO [regionserver/5c70517e8267:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:18,973 INFO [regionserver/5c70517e8267:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:18,974 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/.tmp/l/0764005db35a4bb3af5de62d9e417b1c 2024-12-07T17:27:18,978 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0764005db35a4bb3af5de62d9e417b1c 2024-12-07T17:27:18,979 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/.tmp/l/0764005db35a4bb3af5de62d9e417b1c as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/l/0764005db35a4bb3af5de62d9e417b1c 2024-12-07T17:27:18,980 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/info/e26a4675eb7e4a748d10007bf27a9242 is 173, key is testExportExpiredSnapshot,1,1733592310717.0138678f1a360d7573c5cfe5f7d0a7e0./info:regioninfo/1733592311118/Put/seqid=0 2024-12-07T17:27:18,983 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0764005db35a4bb3af5de62d9e417b1c 2024-12-07T17:27:18,983 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/l/0764005db35a4bb3af5de62d9e417b1c, entries=12, sequenceid=27, filesize=5.6 K 2024-12-07T17:27:18,984 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 27a2410ab2d9b432daa26ec0ea504b68 in 36ms, sequenceid=27, compaction requested=false 2024-12-07T17:27:18,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742380_1556 (size=15630) 2024-12-07T17:27:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742380_1556 (size=15630) 2024-12-07T17:27:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742380_1556 (size=15630) 2024-12-07T17:27:18,990 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/info/e26a4675eb7e4a748d10007bf27a9242 2024-12-07T17:27:18,991 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/acl/27a2410ab2d9b432daa26ec0ea504b68/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-07T17:27:18,992 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:18,992 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:27:18,992 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 27a2410ab2d9b432daa26ec0ea504b68: 2024-12-07T17:27:18,992 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733592168970.27a2410ab2d9b432daa26ec0ea504b68. 2024-12-07T17:27:18,993 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 6860b2d9133d857f1acf6c9fb2cd82f1, disabling compactions & flushes 2024-12-07T17:27:18,993 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:27:18,993 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:27:18,993 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. after waiting 0 ms 2024-12-07T17:27:18,993 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:27:18,993 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 6860b2d9133d857f1acf6c9fb2cd82f1 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-07T17:27:19,009 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/.tmp/info/ce792b1e9bb44093bf6a5ab34541484e is 45, key is default/info:d/1733592168843/Put/seqid=0 2024-12-07T17:27:19,009 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/rep_barrier/383c6788b4d14af3b0d24d7228ccbaa2 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b./rep_barrier:/1733592308511/DeleteFamily/seqid=0 2024-12-07T17:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742381_1557 (size=8007) 2024-12-07T17:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742382_1558 (size=5037) 2024-12-07T17:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742381_1557 (size=8007) 2024-12-07T17:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742382_1558 (size=5037) 2024-12-07T17:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742382_1558 (size=5037) 2024-12-07T17:27:19,017 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/.tmp/info/ce792b1e9bb44093bf6a5ab34541484e 2024-12-07T17:27:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742381_1557 (size=8007) 2024-12-07T17:27:19,017 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/rep_barrier/383c6788b4d14af3b0d24d7228ccbaa2 2024-12-07T17:27:19,018 INFO [regionserver/5c70517e8267:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:19,022 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/.tmp/info/ce792b1e9bb44093bf6a5ab34541484e as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/info/ce792b1e9bb44093bf6a5ab34541484e 2024-12-07T17:27:19,026 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/info/ce792b1e9bb44093bf6a5ab34541484e, entries=2, sequenceid=6, filesize=4.9 K 2024-12-07T17:27:19,027 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 6860b2d9133d857f1acf6c9fb2cd82f1 in 34ms, sequenceid=6, compaction requested=false 2024-12-07T17:27:19,030 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/namespace/6860b2d9133d857f1acf6c9fb2cd82f1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:19,031 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 6860b2d9133d857f1acf6c9fb2cd82f1: 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733592168179.6860b2d9133d857f1acf6c9fb2cd82f1. 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 95a53dd0f0fa93ab4e6bebd2e0c2e9c5, disabling compactions & flushes 2024-12-07T17:27:19,031 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. after waiting 0 ms 2024-12-07T17:27:19,031 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:27:19,035 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/default/testExportExpiredSnapshot/95a53dd0f0fa93ab4e6bebd2e0c2e9c5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T17:27:19,035 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:19,035 INFO [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:27:19,035 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 95a53dd0f0fa93ab4e6bebd2e0c2e9c5: 2024-12-07T17:27:19,035 DEBUG [RS_CLOSE_REGION-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733592310717.95a53dd0f0fa93ab4e6bebd2e0c2e9c5. 2024-12-07T17:27:19,038 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/table/d138cdc457194ccaa005b87a5d9a773b is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733592288905.4c981ac2f2269323a666f3595ec4f10b./table:/1733592308511/DeleteFamily/seqid=0 2024-12-07T17:27:19,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073742383_1559 (size=8861) 2024-12-07T17:27:19,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073742383_1559 (size=8861) 2024-12-07T17:27:19,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073742383_1559 (size=8861) 2024-12-07T17:27:19,045 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/table/d138cdc457194ccaa005b87a5d9a773b 2024-12-07T17:27:19,050 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/info/e26a4675eb7e4a748d10007bf27a9242 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/info/e26a4675eb7e4a748d10007bf27a9242 2024-12-07T17:27:19,053 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/info/e26a4675eb7e4a748d10007bf27a9242, entries=84, sequenceid=202, filesize=15.3 K 2024-12-07T17:27:19,054 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/rep_barrier/383c6788b4d14af3b0d24d7228ccbaa2 as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/rep_barrier/383c6788b4d14af3b0d24d7228ccbaa2 2024-12-07T17:27:19,058 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/rep_barrier/383c6788b4d14af3b0d24d7228ccbaa2, entries=21, sequenceid=202, filesize=7.8 K 2024-12-07T17:27:19,058 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/.tmp/table/d138cdc457194ccaa005b87a5d9a773b as hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/table/d138cdc457194ccaa005b87a5d9a773b 2024-12-07T17:27:19,062 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/table/d138cdc457194ccaa005b87a5d9a773b, entries=38, sequenceid=202, filesize=8.7 K 2024-12-07T17:27:19,062 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=202, compaction requested=false 2024-12-07T17:27:19,066 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-07T17:27:19,066 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:19,066 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T17:27:19,066 INFO [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T17:27:19,066 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T17:27:19,067 DEBUG [RS_CLOSE_META-regionserver/5c70517e8267:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T17:27:19,153 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1250): stopping server 5c70517e8267,38423,1733592165361; all regions closed. 2024-12-07T17:27:19,153 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1250): stopping server 5c70517e8267,41313,1733592165267; all regions closed. 2024-12-07T17:27:19,153 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1250): stopping server 5c70517e8267,36163,1733592165482; all regions closed. 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741836_1012 (size=80694) 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741833_1009 (size=17296) 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741836_1012 (size=80694) 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741833_1009 (size=17296) 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741834_1010 (size=11754) 2024-12-07T17:27:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741834_1010 (size=11754) 2024-12-07T17:27:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741836_1012 (size=80694) 2024-12-07T17:27:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741833_1009 (size=17296) 2024-12-07T17:27:19,161 DEBUG [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs 2024-12-07T17:27:19,161 INFO [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5c70517e8267%2C38423%2C1733592165361.meta:.meta(num 1733592167936) 2024-12-07T17:27:19,161 DEBUG [RS:0;5c70517e8267:41313 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs 2024-12-07T17:27:19,161 INFO [RS:0;5c70517e8267:41313 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5c70517e8267%2C41313%2C1733592165267:(num 1733592167528) 2024-12-07T17:27:19,161 DEBUG [RS:0;5c70517e8267:41313 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:19,161 DEBUG [RS:2;5c70517e8267:36163 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs 2024-12-07T17:27:19,161 INFO [RS:2;5c70517e8267:36163 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5c70517e8267%2C36163%2C1733592165482:(num 1733592167531) 2024-12-07T17:27:19,161 INFO [RS:0;5c70517e8267:41313 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:19,161 DEBUG [RS:2;5c70517e8267:36163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:19,161 INFO [RS:2;5c70517e8267:36163 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:19,162 INFO [RS:2;5c70517e8267:36163 {}] hbase.ChoreService(370): Chore service for: regionserver/5c70517e8267:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T17:27:19,162 INFO [RS:2;5c70517e8267:36163 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:27:19,162 INFO [RS:2;5c70517e8267:36163 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:27:19,162 INFO [RS:2;5c70517e8267:36163 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:27:19,162 INFO [regionserver/5c70517e8267:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T17:27:19,163 INFO [RS:2;5c70517e8267:36163 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36163 2024-12-07T17:27:19,162 INFO [RS:0;5c70517e8267:41313 {}] hbase.ChoreService(370): Chore service for: regionserver/5c70517e8267:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T17:27:19,163 INFO [RS:0;5c70517e8267:41313 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T17:27:19,163 INFO [RS:0;5c70517e8267:41313 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T17:27:19,163 INFO [RS:0;5c70517e8267:41313 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T17:27:19,163 INFO [regionserver/5c70517e8267:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T17:27:19,163 INFO [RS:0;5c70517e8267:41313 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41313 2024-12-07T17:27:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33165 is added to blk_1073741835_1011 (size=9224) 2024-12-07T17:27:19,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33595 is added to blk_1073741835_1011 (size=9224) 2024-12-07T17:27:19,167 DEBUG [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/oldWALs 2024-12-07T17:27:19,167 INFO [RS:1;5c70517e8267:38423 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5c70517e8267%2C38423%2C1733592165361:(num 1733592167529) 2024-12-07T17:27:19,167 DEBUG [RS:1;5c70517e8267:38423 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:19,167 INFO [RS:1;5c70517e8267:38423 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T17:27:19,167 INFO [RS:1;5c70517e8267:38423 {}] hbase.ChoreService(370): Chore service for: regionserver/5c70517e8267:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T17:27:19,168 INFO [regionserver/5c70517e8267:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T17:27:19,168 INFO [RS:1;5c70517e8267:38423 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38423 2024-12-07T17:27:19,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5c70517e8267,41313,1733592165267 2024-12-07T17:27:19,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T17:27:19,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5c70517e8267,36163,1733592165482 2024-12-07T17:27:19,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5c70517e8267,38423,1733592165361 2024-12-07T17:27:19,171 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5c70517e8267,36163,1733592165482] 2024-12-07T17:27:19,171 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5c70517e8267,36163,1733592165482; numProcessing=1 2024-12-07T17:27:19,173 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5c70517e8267,36163,1733592165482 already deleted, retry=false 2024-12-07T17:27:19,173 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5c70517e8267,36163,1733592165482 expired; onlineServers=2 2024-12-07T17:27:19,173 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5c70517e8267,38423,1733592165361] 2024-12-07T17:27:19,173 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5c70517e8267,38423,1733592165361; numProcessing=2 2024-12-07T17:27:19,174 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5c70517e8267,38423,1733592165361 already deleted, retry=false 2024-12-07T17:27:19,174 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5c70517e8267,38423,1733592165361 expired; onlineServers=1 2024-12-07T17:27:19,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5c70517e8267,41313,1733592165267] 2024-12-07T17:27:19,174 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5c70517e8267,41313,1733592165267; numProcessing=3 2024-12-07T17:27:19,175 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5c70517e8267,41313,1733592165267 already deleted, retry=false 2024-12-07T17:27:19,175 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5c70517e8267,41313,1733592165267 expired; onlineServers=0 2024-12-07T17:27:19,175 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5c70517e8267,36895,1733592164285' ***** 2024-12-07T17:27:19,175 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T17:27:19,175 DEBUG [M:0;5c70517e8267:36895 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@86ac7d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5c70517e8267/172.17.0.2:0 2024-12-07T17:27:19,175 INFO [M:0;5c70517e8267:36895 {}] regionserver.HRegionServer(1224): stopping server 5c70517e8267,36895,1733592164285 2024-12-07T17:27:19,175 INFO [M:0;5c70517e8267:36895 {}] regionserver.HRegionServer(1250): stopping server 5c70517e8267,36895,1733592164285; all regions closed. 2024-12-07T17:27:19,175 DEBUG [M:0;5c70517e8267:36895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T17:27:19,175 DEBUG [M:0;5c70517e8267:36895 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T17:27:19,175 DEBUG [M:0;5c70517e8267:36895 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T17:27:19,175 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T17:27:19,175 DEBUG [master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.large.0-1733592167138 {}] cleaner.HFileCleaner(306): Exit Thread[master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.large.0-1733592167138,5,FailOnTimeoutGroup] 2024-12-07T17:27:19,175 INFO [M:0;5c70517e8267:36895 {}] hbase.ChoreService(370): Chore service for: master/5c70517e8267:0 had [] on shutdown 2024-12-07T17:27:19,176 DEBUG [M:0;5c70517e8267:36895 {}] master.HMaster(1733): Stopping service threads 2024-12-07T17:27:19,176 INFO [M:0;5c70517e8267:36895 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T17:27:19,176 DEBUG [master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.small.0-1733592167145 {}] cleaner.HFileCleaner(306): Exit Thread[master/5c70517e8267:0:becomeActiveMaster-HFileCleaner.small.0-1733592167145,5,FailOnTimeoutGroup] 2024-12-07T17:27:19,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T17:27:19,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T17:27:19,176 INFO [M:0;5c70517e8267:36895 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T17:27:19,177 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T17:27:19,177 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T17:27:19,177 DEBUG [M:0;5c70517e8267:36895 {}] zookeeper.ZKUtil(347): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T17:27:19,177 WARN [M:0;5c70517e8267:36895 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T17:27:19,177 INFO [M:0;5c70517e8267:36895 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-07T17:27:19,177 INFO [M:0;5c70517e8267:36895 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T17:27:19,178 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T17:27:19,210 INFO [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:27:19,210 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:27:19,210 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T17:27:19,211 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T17:27:19,211 INFO [M:0;5c70517e8267:36895 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.83 KB heapSize=967.21 KB 2024-12-07T17:27:19,211 ERROR [AsyncFSWAL-0-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:27:19,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,271 INFO [RS:0;5c70517e8267:41313 {}] regionserver.HRegionServer(1307): Exiting; stopping=5c70517e8267,41313,1733592165267; zookeeper connection closed. 2024-12-07T17:27:19,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41313-0x1006d80dcf90001, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,271 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@60658df8 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@60658df8 2024-12-07T17:27:19,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,272 INFO [RS:2;5c70517e8267:36163 {}] regionserver.HRegionServer(1307): Exiting; stopping=5c70517e8267,36163,1733592165482; zookeeper connection closed. 2024-12-07T17:27:19,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36163-0x1006d80dcf90003, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,272 INFO [RS:1;5c70517e8267:38423 {}] regionserver.HRegionServer(1307): Exiting; stopping=5c70517e8267,38423,1733592165361; zookeeper connection closed. 2024-12-07T17:27:19,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38423-0x1006d80dcf90002, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:27:19,272 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e369a37 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e369a37 2024-12-07T17:27:19,272 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@de17727 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@de17727 2024-12-07T17:27:19,273 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T17:27:21,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741834_1010 (size=11754) 2024-12-07T17:27:21,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741835_1011 (size=9224) 2024-12-07T17:27:21,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45837 is added to blk_1073741830_1006 (size=947297) 2024-12-07T17:27:24,473 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T17:27:24,860 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-07T17:27:30,362 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:27:43,251 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:27:45,603 DEBUG [master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-07T17:27:45,604 DEBUG [master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-07T17:27:53,513 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T17:28:13,252 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5c70517e8267:36895 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1a8a2143 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.CountDownLatch$Sync@48909076 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12217 Waited count: 12819 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@6f96b8c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@75bfaa0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@2ab2bdbc-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 23 Waited count: 3047 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41889df6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33765): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 33414 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33765): State: TIMED_WAITING Blocked count: 69 Waited count: 2097 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33765): State: TIMED_WAITING Blocked count: 69 Waited count: 2094 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33765): State: TIMED_WAITING Blocked count: 81 Waited count: 2110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33765): State: TIMED_WAITING Blocked count: 69 Waited count: 2094 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33765): State: TIMED_WAITING Blocked count: 57 Waited count: 2097 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2117026572)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1029884707-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1029884707-88-acceptor-0@4bff0392-ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1029884707-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1029884707-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-397e9e71-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59cc1837): State: TIMED_WAITING Blocked count: 0 Waited count: 675 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 34995): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 260 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1263 Waited count: 1375 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2bf238a5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1173471231) connection to localhost/127.0.0.1:33765 from jenkins): State: TIMED_WAITING Blocked count: 1260 Waited count: 1260 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 0 Waited count: 1856 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2057355987-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2057355987-122-acceptor-0@2ffac974-ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2057355987-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2057355987-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-6bc79822-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46df0e6a): State: TIMED_WAITING Blocked count: 0 Waited count: 674 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46497): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 285 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74e4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1273 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@42778ec6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 349 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 3 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 145 (IPC Server handler 4 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 157 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 159 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp337563427-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp337563427-163-acceptor-0@16ac471f-ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (qtp337563427-164): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (qtp337563427-167): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 170 (Session-HouseKeeper-63a0eda1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@113e5a5d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@36c9a67f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@41b5ad5e): State: TIMED_WAITING Blocked count: 0 Waited count: 673 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 44011): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 3 Waited count: 282 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3971fa04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1236 Waited count: 1359 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ceeca3c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 347 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3fcc5d6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58133): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 21 Waited count: 732 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16227d79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:58133):): State: WAITING Blocked count: 3 Waited count: 844 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70488d60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 876 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bcb33eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:33765): State: TIMED_WAITING Blocked count: 9 Waited count: 350 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d63ad24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 25 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:58133)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b622adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d3c757a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d66f374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 56 Waited count: 216 Waiting on java.util.concurrent.Semaphore$NonfairSync@4987d9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 245 Waited count: 931 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e86bb25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895): State: WAITING Blocked count: 108 Waited count: 5536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40ff8c2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ddbde45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@769f1f48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a3d536d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@216e4892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 30 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;5c70517e8267:36895): State: TIMED_WAITING Blocked count: 6 Waited count: 2737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$962/0x00007fc828f1e760.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 355 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (org.apache.hadoop.hdfs.PeerCache@55edb415): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3331 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33205 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381e8fab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d5e888d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24557b77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bf2d535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 499 (LeaseRenewer:jenkins.hfs.2@localhost:33765): State: TIMED_WAITING Blocked count: 9 Waited count: 346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 500 (LeaseRenewer:jenkins.hfs.0@localhost:33765): State: TIMED_WAITING Blocked count: 9 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 502 (LeaseRenewer:jenkins.hfs.1@localhost:33765): State: TIMED_WAITING Blocked count: 9 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 507 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33009 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 558 Waiting on java.util.concurrent.ForkJoinPool@5e7e82b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 574 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 644 Waiting on java.util.concurrent.ForkJoinPool@5e7e82b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 575 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 392 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 596 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1008 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c19f8ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1169 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1525 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@6069d5d5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2045 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3075 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5075 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9051 (AsyncFSWAL-1-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4aa8b863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9054 (java.util.concurrent.ThreadPoolExecutor$Worker@375296de[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9056 (java.util.concurrent.ThreadPoolExecutor$Worker@13deb896[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9057 (java.util.concurrent.ThreadPoolExecutor$Worker@50fabc6c[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9062 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T17:28:43,252 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:29:13,252 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5c70517e8267:36895 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1a8a2143 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4022 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.CountDownLatch$Sync@672c4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12217 Waited count: 12820 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@6f96b8c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@75bfaa0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 799 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@2ab2bdbc-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 23 Waited count: 3047 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41889df6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33765): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 39378 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33765): State: TIMED_WAITING Blocked count: 73 Waited count: 2158 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33765): State: TIMED_WAITING Blocked count: 85 Waited count: 2171 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33765): State: TIMED_WAITING Blocked count: 64 Waited count: 2157 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2117026572)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1029884707-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1029884707-88-acceptor-0@4bff0392-ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1029884707-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1029884707-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-397e9e71-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59cc1837): State: TIMED_WAITING Blocked count: 0 Waited count: 795 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 34995): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 280 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1283 Waited count: 1422 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2bf238a5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 404 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 407 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1173471231) connection to localhost/127.0.0.1:33765 from jenkins): State: TIMED_WAITING Blocked count: 1303 Waited count: 1303 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 0 Waited count: 1899 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2057355987-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2057355987-122-acceptor-0@2ffac974-ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2057355987-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2057355987-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-6bc79822-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46df0e6a): State: TIMED_WAITING Blocked count: 0 Waited count: 794 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46497): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74e4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1293 Waited count: 1429 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@42778ec6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 405 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 3 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 145 (IPC Server handler 4 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 157 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 159 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp337563427-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp337563427-163-acceptor-0@16ac471f-ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (qtp337563427-164): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (qtp337563427-167): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 170 (Session-HouseKeeper-63a0eda1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@113e5a5d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@36c9a67f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@41b5ad5e): State: TIMED_WAITING Blocked count: 0 Waited count: 793 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 44011): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 3 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3971fa04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1256 Waited count: 1399 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ceeca3c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 410 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3fcc5d6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58133): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 21 Waited count: 737 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16227d79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:58133):): State: WAITING Blocked count: 3 Waited count: 849 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70488d60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 881 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bcb33eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d63ad24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 286 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 25 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:58133)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b622adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d3c757a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d66f374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 56 Waited count: 216 Waiting on java.util.concurrent.Semaphore$NonfairSync@4987d9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 245 Waited count: 931 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e86bb25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895): State: WAITING Blocked count: 108 Waited count: 5536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40ff8c2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ddbde45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@769f1f48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a3d536d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@216e4892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 30 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;5c70517e8267:36895): State: TIMED_WAITING Blocked count: 6 Waited count: 2737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$962/0x00007fc828f1e760.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 355 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (org.apache.hadoop.hdfs.PeerCache@55edb415): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3930 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fe0c059 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39207 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381e8fab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d5e888d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24557b77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bf2d535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39011 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 558 Waiting on java.util.concurrent.ForkJoinPool@5e7e82b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 574 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 596 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1008 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c19f8ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1169 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1525 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@6069d5d5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2045 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3075 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5075 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9051 (AsyncFSWAL-1-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4aa8b863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9062 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T17:29:43,252 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:30:13,252 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5c70517e8267:36895 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1a8a2143 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4622 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@7187d5f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12217 Waited count: 12821 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@6f96b8c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@75bfaa0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 919 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@2ab2bdbc-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 23 Waited count: 3047 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41889df6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33765): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45343 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33765): State: TIMED_WAITING Blocked count: 73 Waited count: 2218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2215 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33765): State: TIMED_WAITING Blocked count: 85 Waited count: 2231 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33765): State: TIMED_WAITING Blocked count: 64 Waited count: 2218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2117026572)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1029884707-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1029884707-88-acceptor-0@4bff0392-ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1029884707-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1029884707-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-397e9e71-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59cc1837): State: TIMED_WAITING Blocked count: 0 Waited count: 915 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 34995): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 300 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1303 Waited count: 1462 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2bf238a5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1173471231) connection to localhost/127.0.0.1:33765 from jenkins): State: TIMED_WAITING Blocked count: 1362 Waited count: 1362 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 0 Waited count: 1959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2057355987-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2057355987-122-acceptor-0@2ffac974-ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2057355987-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2057355987-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-6bc79822-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46df0e6a): State: TIMED_WAITING Blocked count: 0 Waited count: 914 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46497): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 325 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74e4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1313 Waited count: 1469 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@42778ec6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 472 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 3 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 145 (IPC Server handler 4 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 157 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 159 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp337563427-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp337563427-163-acceptor-0@16ac471f-ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (qtp337563427-164): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (qtp337563427-167): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 170 (Session-HouseKeeper-63a0eda1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@113e5a5d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@36c9a67f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@41b5ad5e): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 44011): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 3 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3971fa04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1276 Waited count: 1439 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ceeca3c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 470 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3fcc5d6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58133): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 21 Waited count: 741 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16227d79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:58133):): State: WAITING Blocked count: 3 Waited count: 853 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70488d60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 885 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bcb33eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d63ad24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 25 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:58133)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b622adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d3c757a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d66f374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 56 Waited count: 216 Waiting on java.util.concurrent.Semaphore$NonfairSync@4987d9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 245 Waited count: 931 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e86bb25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895): State: WAITING Blocked count: 108 Waited count: 5536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40ff8c2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ddbde45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@769f1f48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a3d536d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@216e4892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 30 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;5c70517e8267:36895): State: TIMED_WAITING Blocked count: 6 Waited count: 2737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$962/0x00007fc828f1e760.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 355 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (org.apache.hadoop.hdfs.PeerCache@55edb415): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4530 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fe0c059 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45209 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381e8fab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d5e888d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24557b77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bf2d535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45012 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 596 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1008 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 418 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c19f8ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1169 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1525 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@6069d5d5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2045 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3075 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5075 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9051 (AsyncFSWAL-1-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4aa8b863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9062 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T17:30:43,253 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:31:13,253 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5c70517e8267:36895 218 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1a8a2143 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5222 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@3e8e516b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12217 Waited count: 12822 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@6f96b8c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@75bfaa0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1039 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@2ab2bdbc-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 23 Waited count: 3047 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41889df6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33765): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51307 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33765): State: TIMED_WAITING Blocked count: 73 Waited count: 2280 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33765): State: TIMED_WAITING Blocked count: 85 Waited count: 2292 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33765): State: TIMED_WAITING Blocked count: 64 Waited count: 2279 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2117026572)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1029884707-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1029884707-88-acceptor-0@4bff0392-ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1029884707-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1029884707-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-397e9e71-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59cc1837): State: TIMED_WAITING Blocked count: 0 Waited count: 1035 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 34995): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 320 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1323 Waited count: 1502 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2bf238a5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1173471231) connection to localhost/127.0.0.1:33765 from jenkins): State: TIMED_WAITING Blocked count: 1422 Waited count: 1422 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 0 Waited count: 2019 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2057355987-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2057355987-122-acceptor-0@2ffac974-ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2057355987-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2057355987-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-6bc79822-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46df0e6a): State: TIMED_WAITING Blocked count: 0 Waited count: 1034 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46497): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 345 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74e4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1333 Waited count: 1509 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@42778ec6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 532 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 3 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 145 (IPC Server handler 4 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 157 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 159 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp337563427-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp337563427-163-acceptor-0@16ac471f-ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (qtp337563427-164): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (qtp337563427-167): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 170 (Session-HouseKeeper-63a0eda1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@113e5a5d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@36c9a67f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@41b5ad5e): State: TIMED_WAITING Blocked count: 0 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 44011): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 3 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3971fa04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1296 Waited count: 1479 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ceeca3c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3fcc5d6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58133): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 21 Waited count: 746 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16227d79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:58133):): State: WAITING Blocked count: 3 Waited count: 858 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70488d60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 890 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bcb33eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d63ad24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 342 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 25 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:58133)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b622adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d3c757a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d66f374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 56 Waited count: 216 Waiting on java.util.concurrent.Semaphore$NonfairSync@4987d9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 245 Waited count: 931 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e86bb25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895): State: WAITING Blocked count: 108 Waited count: 5536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40ff8c2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ddbde45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@769f1f48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a3d536d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@216e4892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 30 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;5c70517e8267:36895): State: TIMED_WAITING Blocked count: 6 Waited count: 2737 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$962/0x00007fc828f1e760.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 355 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (org.apache.hadoop.hdfs.PeerCache@55edb415): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5130 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fe0c059 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51210 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381e8fab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d5e888d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24557b77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bf2d535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51014 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1008 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 424 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c19f8ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1169 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1525 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@6069d5d5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2045 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3075 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5075 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9051 (AsyncFSWAL-1-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4aa8b863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9062 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T17:31:43,253 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:32:13,253 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T17:32:19,212 DEBUG [M:0;5c70517e8267:36895 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T17:32:19,212 WARN [M:0;5c70517e8267:36895 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-07T17:32:19,213 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:32:19,215 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T17:32:19,215 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T17:32:19,215 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 2024-12-07T17:32:19,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:32:19,216 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:32:19,216 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 2024-12-07T17:32:19,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5c70517e8267:36895 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 15 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@1a8a2143 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 17 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d76ba68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@121ef4b6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12217 Waited count: 12823 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@6f96b8c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@75bfaa0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@263e077f): State: TIMED_WAITING Blocked count: 0 Waited count: 1159 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2988114-37): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2988114-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2988114-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2988114-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2988114-41-acceptor-0@2ab2bdbc-ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:44973}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2988114-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2988114-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2988114-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4898edba-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 23 Waited count: 3047 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41889df6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33765): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@1f645805): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@40205119): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57271 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@488942f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33765): State: TIMED_WAITING Blocked count: 73 Waited count: 2341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33765): State: TIMED_WAITING Blocked count: 85 Waited count: 2353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33765): State: TIMED_WAITING Blocked count: 71 Waited count: 2336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33765): State: TIMED_WAITING Blocked count: 64 Waited count: 2340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@34326363): State: TIMED_WAITING Blocked count: 0 Waited count: 290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@21d9c23a): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b27bce4): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6f79ee98): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2117026572)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1029884707-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1029884707-88-acceptor-0@4bff0392-ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:46687}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1029884707-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1029884707-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-397e9e71-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@59cc1837): State: TIMED_WAITING Blocked count: 0 Waited count: 1155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 34995): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e6dadab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1343 Waited count: 1542 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2bf238a5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 34995): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (1173471231) connection to localhost/127.0.0.1:33765 from jenkins): State: TIMED_WAITING Blocked count: 1482 Waited count: 1482 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 0 Waited count: 2079 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp2057355987-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp2057355987-122-acceptor-0@2ffac974-ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:33865}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp2057355987-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp2057355987-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-6bc79822-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46df0e6a): State: TIMED_WAITING Blocked count: 0 Waited count: 1154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46497): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 0 Waited count: 365 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74e4858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1353 Waited count: 1549 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@42778ec6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 3 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 145 (IPC Server handler 4 on default port 46497): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 157 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 159 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3)): State: TIMED_WAITING Blocked count: 14 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 160 (qtp337563427-160): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc828428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (qtp337563427-163-acceptor-0@16ac471f-ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:33521}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (qtp337563427-164): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (qtp337563427-167): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 170 (Session-HouseKeeper-63a0eda1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@113e5a5d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@36c9a67f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@41b5ad5e): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 44011): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 3 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3971fa04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765): State: TIMED_WAITING Blocked count: 1316 Waited count: 1519 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ceeca3c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 44011): State: TIMED_WAITING Blocked count: 0 Waited count: 598 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@3fcc5d6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:58133): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 21 Waited count: 750 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16227d79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:58133):): State: WAITING Blocked count: 3 Waited count: 862 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70488d60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 894 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bcb33eb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d63ad24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 370 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 25 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:58133)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b622adb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 35 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d3c757a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48afd8fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@5d66f374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 56 Waited count: 216 Waiting on java.util.concurrent.Semaphore$NonfairSync@4987d9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 245 Waited count: 931 Waiting on java.util.concurrent.Semaphore$NonfairSync@2e86bb25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36895): State: WAITING Blocked count: 108 Waited count: 5536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@40ff8c2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2a683a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ddbde45 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@769f1f48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a3d536d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=36895): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@216e4892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 310 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 30 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;5c70517e8267:36895): State: TIMED_WAITING Blocked count: 6 Waited count: 2738 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/5c70517e8267:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (org.apache.hadoop.hdfs.PeerCache@55edb415): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 378 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5729 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6fe0c059 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 27 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@381e8fab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d5e888d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 472 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@24557b77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 473 (regionserver/5c70517e8267:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bf2d535 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 507 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (region-location-0): State: WAITING Blocked count: 9 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57015 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 559 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 13 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 596 (region-location-1): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 597 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 598 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1008 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1110 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 60 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c19f8ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1169 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1525 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@6069d5d5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2045 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3075 (region-location-4): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@675d7437 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5075 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9051 (AsyncFSWAL-1-hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData-prefix:5c70517e8267,36895,1733592164285): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4aa8b863 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9062 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 9063 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9068 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9069 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1129/0x00007fc829161f70.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-07T17:32:23,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T17:32:24,213 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-07T17:32:24,213 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T17:32:24,213 INFO [M:0;5c70517e8267:36895 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-07T17:32:24,214 INFO [M:0;5c70517e8267:36895 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36895 2024-12-07T17:32:24,215 DEBUG [M:0;5c70517e8267:36895 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/5c70517e8267,36895,1733592164285 already deleted, retry=false 2024-12-07T17:32:24,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33765/user/jenkins/test-data/5986ae82-8fa6-a93a-7951-d8c9e24089bc/MasterData/WALs/5c70517e8267,36895,1733592164285/5c70517e8267%2C36895%2C1733592164285.1733592166119 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-07T17:32:24,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:32:24,317 INFO [M:0;5c70517e8267:36895 {}] regionserver.HRegionServer(1307): Exiting; stopping=5c70517e8267,36895,1733592164285; zookeeper connection closed. 2024-12-07T17:32:24,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36895-0x1006d80dcf90000, quorum=127.0.0.1:58133, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T17:32:24,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@767cd31a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:32:24,321 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@950ceea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:32:24,321 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:32:24,321 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69e2404e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:32:24,322 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f4d5ab4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:32:24,324 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:32:24,324 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:32:24,324 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:32:24,324 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-719710545-172.17.0.2-1733592159624 (Datanode Uuid 846535b3-9b1c-4b8c-b76b-292d5a3f9bc2) service to localhost/127.0.0.1:33765 2024-12-07T17:32:24,326 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data5/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,326 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data6/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,327 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:32:24,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4f5b39a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:32:24,329 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ad86901{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:32:24,329 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:32:24,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@96ca12c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:32:24,329 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a110049{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:32:24,331 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:32:24,331 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:32:24,331 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-719710545-172.17.0.2-1733592159624 (Datanode Uuid 49b33b62-4bc1-4f0f-8c4e-a12d87d39afa) service to localhost/127.0.0.1:33765 2024-12-07T17:32:24,331 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:32:24,331 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data3/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,332 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data4/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,332 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:32:24,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@133e419e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T17:32:24,336 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69faf5ec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:32:24,336 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:32:24,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@231bb61b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:32:24,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5648e9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:32:24,337 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T17:32:24,337 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T17:32:24,337 WARN [BP-719710545-172.17.0.2-1733592159624 heartbeating to localhost/127.0.0.1:33765 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-719710545-172.17.0.2-1733592159624 (Datanode Uuid 84e8f957-34ea-4a56-9f59-ce7d0ef2643a) service to localhost/127.0.0.1:33765 2024-12-07T17:32:24,337 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T17:32:24,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data1/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/cluster_223401e8-ce05-d2e0-c4fb-30dc0c6a9c16/dfs/data/data2/current/BP-719710545-172.17.0.2-1733592159624 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T17:32:24,338 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T17:32:24,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7883a2cb{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T17:32:24,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d3d9b09{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T17:32:24,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T17:32:24,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@343317a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T17:32:24,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a82d853{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/412bbba9-7faa-4247-78d3-ece6a960d14d/hadoop.log.dir/,STOPPED} 2024-12-07T17:32:24,358 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-07T17:32:24,624 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down